file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
input.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
use std::fmt::Write;
/// See [`DescribeRecommendationExportJobsInput`](crate::input::DescribeRecommendationExportJobsInput)
pub mod describe_recommendation_export_jobs_input {
/// A builder for [`DescribeRecommendationExportJobsInput`](crate::input::DescribeRecommendationExportJobsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::JobFilter>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// Appends an item to `job_ids`.
///
/// To override the contents of this collection use [`set_job_ids`](Self::set_job_ids).
///
/// <p>The identification numbers of the export jobs to return.</p>
///
/// <p>An export job ID is returned when you create an export using the <a>ExportAutoScalingGroupRecommendations</a> or <a>ExportEC2InstanceRecommendations</a> actions.</p>
///
/// <p>All export jobs created in the last seven days are returned if this parameter is
/// omitted.</p>
pub fn job_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.job_ids.unwrap_or_default();
v.push(input.into());
self.job_ids = Some(v);
self
}
/// <p>The identification numbers of the export jobs to return.</p>
///
/// <p>An export job ID is returned when you create an export using the <a>ExportAutoScalingGroupRecommendations</a> or <a>ExportEC2InstanceRecommendations</a> actions.</p>
///
/// <p>All export jobs created in the last seven days are returned if this parameter is
/// omitted.</p>
pub fn set_job_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.job_ids = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that returns a more specific list of export
/// jobs.</p>
pub fn filters(mut self, input: impl Into<crate::model::JobFilter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that returns a more specific list of export
/// jobs.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::JobFilter>>,
) -> Self {
self.filters = input;
self
}
/// <p>The token to advance to the next page of export jobs.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of export jobs.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of export jobs to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of export jobs to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`DescribeRecommendationExportJobsInput`](crate::input::DescribeRecommendationExportJobsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeRecommendationExportJobsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeRecommendationExportJobsInput {
job_ids: self.job_ids,
filters: self.filters,
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type DescribeRecommendationExportJobsInputOperationOutputAlias =
crate::operation::DescribeRecommendationExportJobs;
#[doc(hidden)]
pub type DescribeRecommendationExportJobsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DescribeRecommendationExportJobsInput {
/// Consumes the builder and constructs an Operation<[`DescribeRecommendationExportJobs`](crate::operation::DescribeRecommendationExportJobs)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::DescribeRecommendationExportJobs,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::DescribeRecommendationExportJobsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::DescribeRecommendationExportJobsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::DescribeRecommendationExportJobsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.DescribeRecommendationExportJobs",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_describe_recommendation_export_jobs(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::DescribeRecommendationExportJobs::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"DescribeRecommendationExportJobs",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DescribeRecommendationExportJobsInput`](crate::input::DescribeRecommendationExportJobsInput)
pub fn builder() -> crate::input::describe_recommendation_export_jobs_input::Builder {
crate::input::describe_recommendation_export_jobs_input::Builder::default()
}
}
/// See [`ExportAutoScalingGroupRecommendationsInput`](crate::input::ExportAutoScalingGroupRecommendationsInput)
pub mod export_auto_scaling_group_recommendations_input {
/// A builder for [`ExportAutoScalingGroupRecommendationsInput`](crate::input::ExportAutoScalingGroupRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
pub(crate) fields_to_export:
std::option::Option<std::vec::Vec<crate::model::ExportableAutoScalingGroupField>>,
pub(crate) s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
pub(crate) file_format: std::option::Option<crate::model::FileFormat>,
pub(crate) include_member_accounts: std::option::Option<bool>,
pub(crate) recommendation_preferences:
std::option::Option<crate::model::RecommendationPreferences>,
}
impl Builder {
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The IDs of the Amazon Web Services accounts for which to export Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The IDs of the Amazon Web Services accounts for which to export Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that exports a more specific set of Auto Scaling group recommendations.</p>
pub fn filters(mut self, input: impl Into<crate::model::Filter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that exports a more specific set of Auto Scaling group recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Filter>>,
) -> Self {
self.filters = input;
self
}
/// Appends an item to `fields_to_export`.
///
/// To override the contents of this collection use [`set_fields_to_export`](Self::set_fields_to_export).
///
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
mut self,
input: impl Into<crate::model::ExportableAutoScalingGroupField>,
) -> Self {
let mut v = self.fields_to_export.unwrap_or_default();
v.push(input.into());
self.fields_to_export = Some(v);
self
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn set_fields_to_export(
mut self,
input: std::option::Option<
std::vec::Vec<crate::model::ExportableAutoScalingGroupField>,
>,
) -> Self {
self.fields_to_export = input;
self
}
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(mut self, input: crate::model::S3DestinationConfig) -> Self {
self.s3_destination_config = Some(input);
self
}
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn set_s3_destination_config(
mut self,
input: std::option::Option<crate::model::S3DestinationConfig>,
) -> Self {
self.s3_destination_config = input;
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(mut self, input: crate::model::FileFormat) -> Self {
self.file_format = Some(input);
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn set_file_format(
mut self,
input: std::option::Option<crate::model::FileFormat>,
) -> Self {
self.file_format = input;
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(mut self, input: bool) -> Self {
self.include_member_accounts = Some(input);
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn set_include_member_accounts(mut self, input: std::option::Option<bool>) -> Self {
self.include_member_accounts = input;
self
}
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to export.</p>
pub fn recommendation_preferences(
mut self,
input: crate::model::RecommendationPreferences,
) -> Self {
self.recommendation_preferences = Some(input);
self
}
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to export.</p>
pub fn set_recommendation_preferences(
mut self,
input: std::option::Option<crate::model::RecommendationPreferences>,
) -> Self {
self.recommendation_preferences = input;
self
}
/// Consumes the builder and constructs a [`ExportAutoScalingGroupRecommendationsInput`](crate::input::ExportAutoScalingGroupRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ExportAutoScalingGroupRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ExportAutoScalingGroupRecommendationsInput {
account_ids: self.account_ids,
filters: self.filters,
fields_to_export: self.fields_to_export,
s3_destination_config: self.s3_destination_config,
file_format: self.file_format,
include_member_accounts: self.include_member_accounts.unwrap_or_default(),
recommendation_preferences: self.recommendation_preferences,
})
}
}
}
#[doc(hidden)]
pub type ExportAutoScalingGroupRecommendationsInputOperationOutputAlias =
crate::operation::ExportAutoScalingGroupRecommendations;
#[doc(hidden)]
pub type ExportAutoScalingGroupRecommendationsInputOperationRetryAlias =
aws_http::AwsErrorRetryPolicy;
impl ExportAutoScalingGroupRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`ExportAutoScalingGroupRecommendations`](crate::operation::ExportAutoScalingGroupRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ExportAutoScalingGroupRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::ExportAutoScalingGroupRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ExportAutoScalingGroupRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::ExportAutoScalingGroupRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.ExportAutoScalingGroupRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_export_auto_scaling_group_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ExportAutoScalingGroupRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ExportAutoScalingGroupRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ExportAutoScalingGroupRecommendationsInput`](crate::input::ExportAutoScalingGroupRecommendationsInput)
pub fn builder() -> crate::input::export_auto_scaling_group_recommendations_input::Builder {
crate::input::export_auto_scaling_group_recommendations_input::Builder::default()
}
}
/// See [`ExportEbsVolumeRecommendationsInput`](crate::input::ExportEbsVolumeRecommendationsInput)
pub mod export_ebs_volume_recommendations_input {
/// A builder for [`ExportEbsVolumeRecommendationsInput`](crate::input::ExportEbsVolumeRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::EbsFilter>>,
pub(crate) fields_to_export:
std::option::Option<std::vec::Vec<crate::model::ExportableVolumeField>>,
pub(crate) s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
pub(crate) file_format: std::option::Option<crate::model::FileFormat>,
pub(crate) include_member_accounts: std::option::Option<bool>,
}
impl Builder {
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The IDs of the Amazon Web Services accounts for which to export Amazon EBS
/// volume recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The IDs of the Amazon Web Services accounts for which to export Amazon EBS
/// volume recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that exports a more specific set of Amazon EBS volume recommendations.</p>
pub fn filters(mut self, input: impl Into<crate::model::EbsFilter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that exports a more specific set of Amazon EBS volume recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EbsFilter>>,
) -> Self {
self.filters = input;
self
}
/// Appends an item to `fields_to_export`.
///
/// To override the contents of this collection use [`set_fields_to_export`](Self::set_fields_to_export).
///
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
mut self,
input: impl Into<crate::model::ExportableVolumeField>,
) -> Self {
let mut v = self.fields_to_export.unwrap_or_default();
v.push(input.into());
self.fields_to_export = Some(v);
self
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn set_fields_to_export(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ExportableVolumeField>>,
) -> Self {
self.fields_to_export = input;
self
}
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(mut self, input: crate::model::S3DestinationConfig) -> Self {
self.s3_destination_config = Some(input);
self
}
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn set_s3_destination_config(
mut self,
input: std::option::Option<crate::model::S3DestinationConfig>,
) -> Self {
self.s3_destination_config = input;
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(mut self, input: crate::model::FileFormat) -> Self {
self.file_format = Some(input);
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn set_file_format(
mut self,
input: std::option::Option<crate::model::FileFormat>,
) -> Self {
self.file_format = input;
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(mut self, input: bool) -> Self {
self.include_member_accounts = Some(input);
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn set_include_member_accounts(mut self, input: std::option::Option<bool>) -> Self {
self.include_member_accounts = input;
self
}
/// Consumes the builder and constructs a [`ExportEbsVolumeRecommendationsInput`](crate::input::ExportEbsVolumeRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ExportEbsVolumeRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ExportEbsVolumeRecommendationsInput {
account_ids: self.account_ids,
filters: self.filters,
fields_to_export: self.fields_to_export,
s3_destination_config: self.s3_destination_config,
file_format: self.file_format,
include_member_accounts: self.include_member_accounts.unwrap_or_default(),
})
}
}
}
#[doc(hidden)]
pub type ExportEbsVolumeRecommendationsInputOperationOutputAlias =
crate::operation::ExportEBSVolumeRecommendations;
#[doc(hidden)]
pub type ExportEbsVolumeRecommendationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl ExportEbsVolumeRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`ExportEBSVolumeRecommendations`](crate::operation::ExportEBSVolumeRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ExportEBSVolumeRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::ExportEbsVolumeRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ExportEbsVolumeRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::ExportEbsVolumeRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.ExportEBSVolumeRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_export_ebs_volume_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ExportEBSVolumeRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ExportEBSVolumeRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ExportEbsVolumeRecommendationsInput`](crate::input::ExportEbsVolumeRecommendationsInput)
pub fn builder() -> crate::input::export_ebs_volume_recommendations_input::Builder {
crate::input::export_ebs_volume_recommendations_input::Builder::default()
}
}
/// See [`ExportEc2InstanceRecommendationsInput`](crate::input::ExportEc2InstanceRecommendationsInput)
pub mod export_ec2_instance_recommendations_input {
/// A builder for [`ExportEc2InstanceRecommendationsInput`](crate::input::ExportEc2InstanceRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
pub(crate) fields_to_export:
std::option::Option<std::vec::Vec<crate::model::ExportableInstanceField>>,
pub(crate) s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
pub(crate) file_format: std::option::Option<crate::model::FileFormat>,
pub(crate) include_member_accounts: std::option::Option<bool>,
pub(crate) recommendation_preferences:
std::option::Option<crate::model::RecommendationPreferences>,
}
impl Builder {
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The IDs of the Amazon Web Services accounts for which to export instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The IDs of the Amazon Web Services accounts for which to export instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that exports a more specific set of instance
/// recommendations.</p>
pub fn filters(mut self, input: impl Into<crate::model::Filter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that exports a more specific set of instance
/// recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Filter>>,
) -> Self {
self.filters = input;
self
}
/// Appends an item to `fields_to_export`.
///
/// To override the contents of this collection use [`set_fields_to_export`](Self::set_fields_to_export).
///
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
mut self,
input: impl Into<crate::model::ExportableInstanceField>,
) -> Self {
let mut v = self.fields_to_export.unwrap_or_default();
v.push(input.into());
self.fields_to_export = Some(v);
self
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn set_fields_to_export(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ExportableInstanceField>>,
) -> Self {
self.fields_to_export = input;
self
}
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(mut self, input: crate::model::S3DestinationConfig) -> Self {
self.s3_destination_config = Some(input);
self
}
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn set_s3_destination_config(
mut self,
input: std::option::Option<crate::model::S3DestinationConfig>,
) -> Self {
self.s3_destination_config = input;
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(mut self, input: crate::model::FileFormat) -> Self {
self.file_format = Some(input);
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn set_file_format(
mut self,
input: std::option::Option<crate::model::FileFormat>,
) -> Self {
self.file_format = input;
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(mut self, input: bool) -> Self {
self.include_member_accounts = Some(input);
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn set_include_member_accounts(mut self, input: std::option::Option<bool>) -> Self {
self.include_member_accounts = input;
self
}
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to export.</p>
pub fn recommendation_preferences(
mut self,
input: crate::model::RecommendationPreferences,
) -> Self {
self.recommendation_preferences = Some(input);
self
}
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to export.</p>
pub fn set_recommendation_preferences(
mut self,
input: std::option::Option<crate::model::RecommendationPreferences>,
) -> Self {
self.recommendation_preferences = input;
self
}
/// Consumes the builder and constructs a [`ExportEc2InstanceRecommendationsInput`](crate::input::ExportEc2InstanceRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ExportEc2InstanceRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ExportEc2InstanceRecommendationsInput {
account_ids: self.account_ids,
filters: self.filters,
fields_to_export: self.fields_to_export,
s3_destination_config: self.s3_destination_config,
file_format: self.file_format,
include_member_accounts: self.include_member_accounts.unwrap_or_default(),
recommendation_preferences: self.recommendation_preferences,
})
}
}
}
#[doc(hidden)]
pub type ExportEc2InstanceRecommendationsInputOperationOutputAlias =
crate::operation::ExportEC2InstanceRecommendations;
#[doc(hidden)]
pub type ExportEc2InstanceRecommendationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl ExportEc2InstanceRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`ExportEC2InstanceRecommendations`](crate::operation::ExportEC2InstanceRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ExportEC2InstanceRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::ExportEc2InstanceRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ExportEc2InstanceRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::ExportEc2InstanceRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.ExportEC2InstanceRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_export_ec2_instance_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ExportEC2InstanceRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ExportEC2InstanceRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ExportEc2InstanceRecommendationsInput`](crate::input::ExportEc2InstanceRecommendationsInput)
pub fn builder() -> crate::input::export_ec2_instance_recommendations_input::Builder {
crate::input::export_ec2_instance_recommendations_input::Builder::default()
}
}
/// See [`ExportLambdaFunctionRecommendationsInput`](crate::input::ExportLambdaFunctionRecommendationsInput)
pub mod export_lambda_function_recommendations_input {
/// A builder for [`ExportLambdaFunctionRecommendationsInput`](crate::input::ExportLambdaFunctionRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) filters:
std::option::Option<std::vec::Vec<crate::model::LambdaFunctionRecommendationFilter>>,
pub(crate) fields_to_export:
std::option::Option<std::vec::Vec<crate::model::ExportableLambdaFunctionField>>,
pub(crate) s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
pub(crate) file_format: std::option::Option<crate::model::FileFormat>,
pub(crate) include_member_accounts: std::option::Option<bool>,
}
impl Builder {
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The IDs of the Amazon Web Services accounts for which to export Lambda
/// function recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The IDs of the Amazon Web Services accounts for which to export Lambda
/// function recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that exports a more specific set of Lambda function recommendations.</p>
pub fn filters(
mut self,
input: impl Into<crate::model::LambdaFunctionRecommendationFilter>,
) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that exports a more specific set of Lambda function recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<
std::vec::Vec<crate::model::LambdaFunctionRecommendationFilter>,
>,
) -> Self {
self.filters = input;
self
}
/// Appends an item to `fields_to_export`.
///
/// To override the contents of this collection use [`set_fields_to_export`](Self::set_fields_to_export).
///
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
mut self,
input: impl Into<crate::model::ExportableLambdaFunctionField>,
) -> Self {
let mut v = self.fields_to_export.unwrap_or_default();
v.push(input.into());
self.fields_to_export = Some(v);
self
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn set_fields_to_export(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ExportableLambdaFunctionField>>,
) -> Self {
self.fields_to_export = input;
self
}
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(mut self, input: crate::model::S3DestinationConfig) -> Self {
self.s3_destination_config = Some(input);
self
}
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn set_s3_destination_config(
mut self,
input: std::option::Option<crate::model::S3DestinationConfig>,
) -> Self {
self.s3_destination_config = input;
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(mut self, input: crate::model::FileFormat) -> Self {
self.file_format = Some(input);
self
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn set_file_format(
mut self,
input: std::option::Option<crate::model::FileFormat>,
) -> Self {
self.file_format = input;
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(mut self, input: bool) -> Self {
self.include_member_accounts = Some(input);
self
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn set_include_member_accounts(mut self, input: std::option::Option<bool>) -> Self {
self.include_member_accounts = input;
self
}
/// Consumes the builder and constructs a [`ExportLambdaFunctionRecommendationsInput`](crate::input::ExportLambdaFunctionRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ExportLambdaFunctionRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ExportLambdaFunctionRecommendationsInput {
account_ids: self.account_ids,
filters: self.filters,
fields_to_export: self.fields_to_export,
s3_destination_config: self.s3_destination_config,
file_format: self.file_format,
include_member_accounts: self.include_member_accounts.unwrap_or_default(),
})
}
}
}
#[doc(hidden)]
pub type ExportLambdaFunctionRecommendationsInputOperationOutputAlias =
crate::operation::ExportLambdaFunctionRecommendations;
#[doc(hidden)]
pub type ExportLambdaFunctionRecommendationsInputOperationRetryAlias =
aws_http::AwsErrorRetryPolicy;
impl ExportLambdaFunctionRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`ExportLambdaFunctionRecommendations`](crate::operation::ExportLambdaFunctionRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ExportLambdaFunctionRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::ExportLambdaFunctionRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ExportLambdaFunctionRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::ExportLambdaFunctionRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.ExportLambdaFunctionRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_export_lambda_function_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ExportLambdaFunctionRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ExportLambdaFunctionRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ExportLambdaFunctionRecommendationsInput`](crate::input::ExportLambdaFunctionRecommendationsInput)
pub fn builder() -> crate::input::export_lambda_function_recommendations_input::Builder {
crate::input::export_lambda_function_recommendations_input::Builder::default()
}
}
/// See [`GetAutoScalingGroupRecommendationsInput`](crate::input::GetAutoScalingGroupRecommendationsInput)
pub mod get_auto_scaling_group_recommendations_input {
/// A builder for [`GetAutoScalingGroupRecommendationsInput`](crate::input::GetAutoScalingGroupRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) auto_scaling_group_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
pub(crate) recommendation_preferences:
std::option::Option<crate::model::RecommendationPreferences>,
}
impl Builder {
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The ID of the Amazon Web Services account for which to return Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return Auto Scaling group
/// recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The ID of the Amazon Web Services account for which to return Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return Auto Scaling group
/// recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Appends an item to `auto_scaling_group_arns`.
///
/// To override the contents of this collection use [`set_auto_scaling_group_arns`](Self::set_auto_scaling_group_arns).
///
/// <p>The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return
/// recommendations.</p>
pub fn auto_scaling_group_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.auto_scaling_group_arns.unwrap_or_default();
v.push(input.into());
self.auto_scaling_group_arns = Some(v);
self
}
/// <p>The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return
/// recommendations.</p>
pub fn set_auto_scaling_group_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.auto_scaling_group_arns = input;
self
}
/// <p>The token to advance to the next page of Auto Scaling group
/// recommendations.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of Auto Scaling group
/// recommendations.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of Auto Scaling group recommendations to return with a single
/// request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of Auto Scaling group recommendations to return with a single
/// request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that returns a more specific list of Auto Scaling group recommendations.</p>
pub fn filters(mut self, input: impl Into<crate::model::Filter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that returns a more specific list of Auto Scaling group recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Filter>>,
) -> Self {
self.filters = input;
self
}
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to return in the response.</p>
pub fn recommendation_preferences(
mut self,
input: crate::model::RecommendationPreferences,
) -> Self {
self.recommendation_preferences = Some(input);
self
}
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to return in the response.</p>
pub fn set_recommendation_preferences(
mut self,
input: std::option::Option<crate::model::RecommendationPreferences>,
) -> Self {
self.recommendation_preferences = input;
self
}
/// Consumes the builder and constructs a [`GetAutoScalingGroupRecommendationsInput`](crate::input::GetAutoScalingGroupRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetAutoScalingGroupRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetAutoScalingGroupRecommendationsInput {
account_ids: self.account_ids,
auto_scaling_group_arns: self.auto_scaling_group_arns,
next_token: self.next_token,
max_results: self.max_results,
filters: self.filters,
recommendation_preferences: self.recommendation_preferences,
})
}
}
}
#[doc(hidden)]
pub type GetAutoScalingGroupRecommendationsInputOperationOutputAlias =
crate::operation::GetAutoScalingGroupRecommendations;
#[doc(hidden)]
pub type GetAutoScalingGroupRecommendationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetAutoScalingGroupRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`GetAutoScalingGroupRecommendations`](crate::operation::GetAutoScalingGroupRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetAutoScalingGroupRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetAutoScalingGroupRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetAutoScalingGroupRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetAutoScalingGroupRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetAutoScalingGroupRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_auto_scaling_group_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetAutoScalingGroupRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetAutoScalingGroupRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetAutoScalingGroupRecommendationsInput`](crate::input::GetAutoScalingGroupRecommendationsInput)
pub fn builder() -> crate::input::get_auto_scaling_group_recommendations_input::Builder {
crate::input::get_auto_scaling_group_recommendations_input::Builder::default()
}
}
/// See [`GetEbsVolumeRecommendationsInput`](crate::input::GetEbsVolumeRecommendationsInput)
pub mod get_ebs_volume_recommendations_input {
/// A builder for [`GetEbsVolumeRecommendationsInput`](crate::input::GetEbsVolumeRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) volume_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::EbsFilter>>,
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// Appends an item to `volume_arns`.
///
/// To override the contents of this collection use [`set_volume_arns`](Self::set_volume_arns).
///
/// <p>The Amazon Resource Name (ARN) of the volumes for which to return
/// recommendations.</p>
pub fn volume_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.volume_arns.unwrap_or_default();
v.push(input.into());
self.volume_arns = Some(v);
self
}
/// <p>The Amazon Resource Name (ARN) of the volumes for which to return
/// recommendations.</p>
pub fn set_volume_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.volume_arns = input;
self
}
/// <p>The token to advance to the next page of volume recommendations.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of volume recommendations.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of volume recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of volume recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that returns a more specific list of volume
/// recommendations.</p>
pub fn filters(mut self, input: impl Into<crate::model::EbsFilter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that returns a more specific list of volume
/// recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EbsFilter>>,
) -> Self {
self.filters = input;
self
}
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The ID of the Amazon Web Services account for which to return volume
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return volume recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The ID of the Amazon Web Services account for which to return volume
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return volume recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Consumes the builder and constructs a [`GetEbsVolumeRecommendationsInput`](crate::input::GetEbsVolumeRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetEbsVolumeRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetEbsVolumeRecommendationsInput {
volume_arns: self.volume_arns,
next_token: self.next_token,
max_results: self.max_results,
filters: self.filters,
account_ids: self.account_ids,
})
}
}
}
#[doc(hidden)]
pub type GetEbsVolumeRecommendationsInputOperationOutputAlias =
crate::operation::GetEBSVolumeRecommendations;
#[doc(hidden)]
pub type GetEbsVolumeRecommendationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetEbsVolumeRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`GetEBSVolumeRecommendations`](crate::operation::GetEBSVolumeRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetEBSVolumeRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetEbsVolumeRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetEbsVolumeRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetEbsVolumeRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetEBSVolumeRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_ebs_volume_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetEBSVolumeRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetEBSVolumeRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetEbsVolumeRecommendationsInput`](crate::input::GetEbsVolumeRecommendationsInput)
pub fn builder() -> crate::input::get_ebs_volume_recommendations_input::Builder {
crate::input::get_ebs_volume_recommendations_input::Builder::default()
}
}
/// See [`GetEc2InstanceRecommendationsInput`](crate::input::GetEc2InstanceRecommendationsInput)
pub mod get_ec2_instance_recommendations_input {
/// A builder for [`GetEc2InstanceRecommendationsInput`](crate::input::GetEc2InstanceRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) recommendation_preferences:
std::option::Option<crate::model::RecommendationPreferences>,
}
impl Builder {
/// Appends an item to `instance_arns`.
///
/// To override the contents of this collection use [`set_instance_arns`](Self::set_instance_arns).
///
/// <p>The Amazon Resource Name (ARN) of the instances for which to return
/// recommendations.</p>
pub fn instance_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.instance_arns.unwrap_or_default();
v.push(input.into());
self.instance_arns = Some(v);
self
}
/// <p>The Amazon Resource Name (ARN) of the instances for which to return
/// recommendations.</p>
pub fn set_instance_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.instance_arns = input;
self
}
/// <p>The token to advance to the next page of instance recommendations.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of instance recommendations.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of instance recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of instance recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that returns a more specific list of instance
/// recommendations.</p>
pub fn filters(mut self, input: impl Into<crate::model::Filter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that returns a more specific list of instance
/// recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Filter>>,
) -> Self {
self.filters = input;
self
}
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The ID of the Amazon Web Services account for which to return instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return instance recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The ID of the Amazon Web Services account for which to return instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return instance recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to return in the response.</p>
pub fn recommendation_preferences(
mut self,
input: crate::model::RecommendationPreferences,
) -> Self {
self.recommendation_preferences = Some(input);
self
}
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to return in the response.</p>
pub fn set_recommendation_preferences(
mut self,
input: std::option::Option<crate::model::RecommendationPreferences>,
) -> Self {
self.recommendation_preferences = input;
self
}
/// Consumes the builder and constructs a [`GetEc2InstanceRecommendationsInput`](crate::input::GetEc2InstanceRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetEc2InstanceRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetEc2InstanceRecommendationsInput {
instance_arns: self.instance_arns,
next_token: self.next_token,
max_results: self.max_results,
filters: self.filters,
account_ids: self.account_ids,
recommendation_preferences: self.recommendation_preferences,
})
}
}
}
#[doc(hidden)]
pub type GetEc2InstanceRecommendationsInputOperationOutputAlias =
crate::operation::GetEC2InstanceRecommendations;
#[doc(hidden)]
pub type GetEc2InstanceRecommendationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetEc2InstanceRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`GetEC2InstanceRecommendations`](crate::operation::GetEC2InstanceRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetEC2InstanceRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetEc2InstanceRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetEc2InstanceRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetEc2InstanceRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetEC2InstanceRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_ec2_instance_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetEC2InstanceRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetEC2InstanceRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetEc2InstanceRecommendationsInput`](crate::input::GetEc2InstanceRecommendationsInput)
pub fn builder() -> crate::input::get_ec2_instance_recommendations_input::Builder {
crate::input::get_ec2_instance_recommendations_input::Builder::default()
}
}
/// See [`GetEc2RecommendationProjectedMetricsInput`](crate::input::GetEc2RecommendationProjectedMetricsInput)
pub mod get_ec2_recommendation_projected_metrics_input {
/// A builder for [`GetEc2RecommendationProjectedMetricsInput`](crate::input::GetEc2RecommendationProjectedMetricsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_arn: std::option::Option<std::string::String>,
pub(crate) stat: std::option::Option<crate::model::MetricStatistic>,
pub(crate) period: std::option::Option<i32>,
pub(crate) start_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) recommendation_preferences:
std::option::Option<crate::model::RecommendationPreferences>,
}
impl Builder {
/// <p>The Amazon Resource Name (ARN) of the instances for which to return recommendation
/// projected metrics.</p>
pub fn instance_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the instances for which to return recommendation
/// projected metrics.</p>
pub fn set_instance_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.instance_arn = input;
self
}
/// <p>The statistic of the projected metrics.</p>
pub fn stat(mut self, input: crate::model::MetricStatistic) -> Self {
self.stat = Some(input);
self
}
/// <p>The statistic of the projected metrics.</p>
pub fn set_stat(
mut self,
input: std::option::Option<crate::model::MetricStatistic>,
) -> Self {
self.stat = input;
self
}
/// <p>The granularity, in seconds, of the projected metrics data points.</p>
pub fn period(mut self, input: i32) -> Self {
self.period = Some(input);
self
}
/// <p>The granularity, in seconds, of the projected metrics data points.</p>
pub fn set_period(mut self, input: std::option::Option<i32>) -> Self {
self.period = input;
self
}
/// <p>The timestamp of the first projected metrics data point to return.</p>
pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_time = Some(input);
self
}
/// <p>The timestamp of the first projected metrics data point to return.</p>
pub fn set_start_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_time = input;
self
}
/// <p>The timestamp of the last projected metrics data point to return.</p>
pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_time = Some(input);
self
}
/// <p>The timestamp of the last projected metrics data point to return.</p>
pub fn set_end_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_time = input;
self
}
/// <p>An object to specify the preferences for the Amazon EC2 recommendation
/// projected metrics to return in the response.</p>
pub fn recommendation_preferences(
mut self,
input: crate::model::RecommendationPreferences,
) -> Self {
self.recommendation_preferences = Some(input);
self
}
/// <p>An object to specify the preferences for the Amazon EC2 recommendation
/// projected metrics to return in the response.</p>
pub fn set_recommendation_preferences(
mut self,
input: std::option::Option<crate::model::RecommendationPreferences>,
) -> Self {
self.recommendation_preferences = input;
self
}
/// Consumes the builder and constructs a [`GetEc2RecommendationProjectedMetricsInput`](crate::input::GetEc2RecommendationProjectedMetricsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetEc2RecommendationProjectedMetricsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetEc2RecommendationProjectedMetricsInput {
instance_arn: self.instance_arn,
stat: self.stat,
period: self.period.unwrap_or_default(),
start_time: self.start_time,
end_time: self.end_time,
recommendation_preferences: self.recommendation_preferences,
})
}
}
}
#[doc(hidden)]
pub type GetEc2RecommendationProjectedMetricsInputOperationOutputAlias =
crate::operation::GetEC2RecommendationProjectedMetrics;
#[doc(hidden)]
pub type GetEc2RecommendationProjectedMetricsInputOperationRetryAlias =
aws_http::AwsErrorRetryPolicy;
impl GetEc2RecommendationProjectedMetricsInput {
/// Consumes the builder and constructs an Operation<[`GetEC2RecommendationProjectedMetrics`](crate::operation::GetEC2RecommendationProjectedMetrics)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetEC2RecommendationProjectedMetrics,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetEc2RecommendationProjectedMetricsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetEc2RecommendationProjectedMetricsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetEc2RecommendationProjectedMetricsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetEC2RecommendationProjectedMetrics",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_ec2_recommendation_projected_metrics(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetEC2RecommendationProjectedMetrics::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetEC2RecommendationProjectedMetrics",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetEc2RecommendationProjectedMetricsInput`](crate::input::GetEc2RecommendationProjectedMetricsInput)
pub fn builder() -> crate::input::get_ec2_recommendation_projected_metrics_input::Builder {
crate::input::get_ec2_recommendation_projected_metrics_input::Builder::default()
}
}
/// See [`GetEnrollmentStatusInput`](crate::input::GetEnrollmentStatusInput)
pub mod get_enrollment_status_input {
/// A builder for [`GetEnrollmentStatusInput`](crate::input::GetEnrollmentStatusInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`GetEnrollmentStatusInput`](crate::input::GetEnrollmentStatusInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetEnrollmentStatusInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetEnrollmentStatusInput {})
}
}
}
#[doc(hidden)]
pub type GetEnrollmentStatusInputOperationOutputAlias = crate::operation::GetEnrollmentStatus;
#[doc(hidden)]
pub type GetEnrollmentStatusInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetEnrollmentStatusInput {
/// Consumes the builder and constructs an Operation<[`GetEnrollmentStatus`](crate::operation::GetEnrollmentStatus)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetEnrollmentStatus,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetEnrollmentStatusInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetEnrollmentStatusInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetEnrollmentStatusInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetEnrollmentStatus",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_enrollment_status(&self)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetEnrollmentStatus::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetEnrollmentStatus",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetEnrollmentStatusInput`](crate::input::GetEnrollmentStatusInput)
pub fn builder() -> crate::input::get_enrollment_status_input::Builder {
crate::input::get_enrollment_status_input::Builder::default()
}
}
/// See [`GetEnrollmentStatusesForOrganizationInput`](crate::input::GetEnrollmentStatusesForOrganizationInput)
pub mod get_enrollment_statuses_for_organization_input {
/// A builder for [`GetEnrollmentStatusesForOrganizationInput`](crate::input::GetEnrollmentStatusesForOrganizationInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::EnrollmentFilter>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that returns a more specific list of account
/// enrollment statuses.</p>
pub fn filters(mut self, input: impl Into<crate::model::EnrollmentFilter>) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that returns a more specific list of account
/// enrollment statuses.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EnrollmentFilter>>,
) -> Self {
self.filters = input;
self
}
/// <p>The token to advance to the next page of account enrollment statuses.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of account enrollment statuses.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of account enrollment statuses to return with a single request. You
/// can specify up to 100 statuses to return with each request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of account enrollment statuses to return with a single request. You
/// can specify up to 100 statuses to return with each request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`GetEnrollmentStatusesForOrganizationInput`](crate::input::GetEnrollmentStatusesForOrganizationInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetEnrollmentStatusesForOrganizationInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetEnrollmentStatusesForOrganizationInput {
filters: self.filters,
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type GetEnrollmentStatusesForOrganizationInputOperationOutputAlias =
crate::operation::GetEnrollmentStatusesForOrganization;
#[doc(hidden)]
pub type GetEnrollmentStatusesForOrganizationInputOperationRetryAlias =
aws_http::AwsErrorRetryPolicy;
impl GetEnrollmentStatusesForOrganizationInput {
/// Consumes the builder and constructs an Operation<[`GetEnrollmentStatusesForOrganization`](crate::operation::GetEnrollmentStatusesForOrganization)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetEnrollmentStatusesForOrganization,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetEnrollmentStatusesForOrganizationInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetEnrollmentStatusesForOrganizationInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetEnrollmentStatusesForOrganizationInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetEnrollmentStatusesForOrganization",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_enrollment_statuses_for_organization(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetEnrollmentStatusesForOrganization::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetEnrollmentStatusesForOrganization",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetEnrollmentStatusesForOrganizationInput`](crate::input::GetEnrollmentStatusesForOrganizationInput)
pub fn builder() -> crate::input::get_enrollment_statuses_for_organization_input::Builder {
crate::input::get_enrollment_statuses_for_organization_input::Builder::default()
}
}
/// See [`GetLambdaFunctionRecommendationsInput`](crate::input::GetLambdaFunctionRecommendationsInput)
pub mod get_lambda_function_recommendations_input {
/// A builder for [`GetLambdaFunctionRecommendationsInput`](crate::input::GetLambdaFunctionRecommendationsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) function_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) filters:
std::option::Option<std::vec::Vec<crate::model::LambdaFunctionRecommendationFilter>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// Appends an item to `function_arns`.
///
/// To override the contents of this collection use [`set_function_arns`](Self::set_function_arns).
///
/// <p>The Amazon Resource Name (ARN) of the functions for which to return
/// recommendations.</p>
///
/// <p>You can specify a qualified or unqualified ARN. If you specify an unqualified ARN
/// without a function version suffix, Compute Optimizer will return recommendations for the
/// latest (<code>$LATEST</code>) version of the function. If you specify a qualified ARN
/// with a version suffix, Compute Optimizer will return recommendations for the specified
/// function version. For more information about using function versions, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html#versioning-versions-using">Using
/// versions</a> in the <i>Lambda Developer
/// Guide</i>.</p>
pub fn function_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.function_arns.unwrap_or_default();
v.push(input.into());
self.function_arns = Some(v);
self
}
/// <p>The Amazon Resource Name (ARN) of the functions for which to return
/// recommendations.</p>
///
/// <p>You can specify a qualified or unqualified ARN. If you specify an unqualified ARN
/// without a function version suffix, Compute Optimizer will return recommendations for the
/// latest (<code>$LATEST</code>) version of the function. If you specify a qualified ARN
/// with a version suffix, Compute Optimizer will return recommendations for the specified
/// function version. For more information about using function versions, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html#versioning-versions-using">Using
/// versions</a> in the <i>Lambda Developer
/// Guide</i>.</p>
pub fn set_function_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.function_arns = input;
self
}
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The ID of the Amazon Web Services account for which to return function
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return function recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The ID of the Amazon Web Services account for which to return function
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return function recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>An array of objects to specify a filter that returns a more specific list of function
/// recommendations.</p>
pub fn filters(
mut self,
input: impl Into<crate::model::LambdaFunctionRecommendationFilter>,
) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input.into());
self.filters = Some(v);
self
}
/// <p>An array of objects to specify a filter that returns a more specific list of function
/// recommendations.</p>
pub fn set_filters(
mut self,
input: std::option::Option<
std::vec::Vec<crate::model::LambdaFunctionRecommendationFilter>,
>,
) -> Self {
self.filters = input;
self
}
/// <p>The token to advance to the next page of function recommendations.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of function recommendations.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of function recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of function recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`GetLambdaFunctionRecommendationsInput`](crate::input::GetLambdaFunctionRecommendationsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetLambdaFunctionRecommendationsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetLambdaFunctionRecommendationsInput {
function_arns: self.function_arns,
account_ids: self.account_ids,
filters: self.filters,
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type GetLambdaFunctionRecommendationsInputOperationOutputAlias =
crate::operation::GetLambdaFunctionRecommendations;
#[doc(hidden)]
pub type GetLambdaFunctionRecommendationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetLambdaFunctionRecommendationsInput {
/// Consumes the builder and constructs an Operation<[`GetLambdaFunctionRecommendations`](crate::operation::GetLambdaFunctionRecommendations)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetLambdaFunctionRecommendations,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetLambdaFunctionRecommendationsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetLambdaFunctionRecommendationsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetLambdaFunctionRecommendationsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetLambdaFunctionRecommendations",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_lambda_function_recommendations(&self)?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetLambdaFunctionRecommendations::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetLambdaFunctionRecommendations",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetLambdaFunctionRecommendationsInput`](crate::input::GetLambdaFunctionRecommendationsInput)
pub fn builder() -> crate::input::get_lambda_function_recommendations_input::Builder {
crate::input::get_lambda_function_recommendations_input::Builder::default()
}
}
/// See [`GetRecommendationSummariesInput`](crate::input::GetRecommendationSummariesInput)
pub mod get_recommendation_summaries_input {
/// A builder for [`GetRecommendationSummariesInput`](crate::input::GetRecommendationSummariesInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// Appends an item to `account_ids`.
///
/// To override the contents of this collection use [`set_account_ids`](Self::set_account_ids).
///
/// <p>The ID of the Amazon Web Services account for which to return recommendation
/// summaries.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return recommendation summaries.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.account_ids.unwrap_or_default();
v.push(input.into());
self.account_ids = Some(v);
self
}
/// <p>The ID of the Amazon Web Services account for which to return recommendation
/// summaries.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return recommendation summaries.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn set_account_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.account_ids = input;
self
}
/// <p>The token to advance to the next page of recommendation summaries.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token to advance to the next page of recommendation summaries.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of recommendation summaries to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of recommendation summaries to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`GetRecommendationSummariesInput`](crate::input::GetRecommendationSummariesInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetRecommendationSummariesInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetRecommendationSummariesInput {
account_ids: self.account_ids,
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type GetRecommendationSummariesInputOperationOutputAlias =
crate::operation::GetRecommendationSummaries;
#[doc(hidden)]
pub type GetRecommendationSummariesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetRecommendationSummariesInput {
/// Consumes the builder and constructs an Operation<[`GetRecommendationSummaries`](crate::operation::GetRecommendationSummaries)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetRecommendationSummaries,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetRecommendationSummariesInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetRecommendationSummariesInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetRecommendationSummariesInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.GetRecommendationSummaries",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_recommendation_summaries(
&self,
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetRecommendationSummaries::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetRecommendationSummaries",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetRecommendationSummariesInput`](crate::input::GetRecommendationSummariesInput)
pub fn builder() -> crate::input::get_recommendation_summaries_input::Builder {
crate::input::get_recommendation_summaries_input::Builder::default()
}
}
/// See [`UpdateEnrollmentStatusInput`](crate::input::UpdateEnrollmentStatusInput)
pub mod update_enrollment_status_input {
/// A builder for [`UpdateEnrollmentStatusInput`](crate::input::UpdateEnrollmentStatusInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::Status>,
pub(crate) include_member_accounts: std::option::Option<bool>,
}
impl Builder {
/// <p>The new enrollment status of the account.</p>
///
/// <p>The following status options are available:</p>
///
/// <ul>
/// <li>
/// <p>
/// <code>Active</code> - Opts in your account to the Compute Optimizer service.
/// Compute Optimizer begins analyzing the configuration and utilization metrics
/// of your Amazon Web Services resources after you opt in. For more information, see
/// <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/metrics.html">Metrics analyzed by Compute Optimizer</a> in the <i>Compute Optimizer User Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Inactive</code> - Opts out your account from the Compute Optimizer
/// service. Your account's recommendations and related metrics data will be deleted
/// from Compute Optimizer after you opt out.</p>
/// </li>
/// </ul>
///
/// <note>
/// <p>The <code>Pending</code> and <code>Failed</code> options cannot be used to update
/// the enrollment status of an account. They are returned in the response of a request
/// to update the enrollment status of an account.</p>
/// </note>
pub fn status(mut self, input: crate::model::Status) -> Self {
self.status = Some(input);
self
}
/// <p>The new enrollment status of the account.</p>
///
/// <p>The following status options are available:</p>
///
/// <ul>
/// <li>
/// <p>
/// <code>Active</code> - Opts in your account to the Compute Optimizer service.
/// Compute Optimizer begins analyzing the configuration and utilization metrics
/// of your Amazon Web Services resources after you opt in. For more information, see
/// <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/metrics.html">Metrics analyzed by Compute Optimizer</a> in the <i>Compute Optimizer User Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Inactive</code> - Opts out your account from the Compute Optimizer
/// service. Your account's recommendations and related metrics data will be deleted
/// from Compute Optimizer after you opt out.</p>
/// </li>
/// </ul>
///
/// <note>
/// <p>The <code>Pending</code> and <code>Failed</code> options cannot be used to update
/// the enrollment status of an account. They are returned in the response of a request
/// to update the enrollment status of an account.</p>
/// </note>
pub fn set_status(mut self, input: std::option::Option<crate::model::Status>) -> Self {
self.status = input;
self
}
/// <p>Indicates whether to enroll member accounts of the organization if the account is the
/// management account of an organization.</p>
pub fn include_member_accounts(mut self, input: bool) -> Self {
self.include_member_accounts = Some(input);
self
}
/// <p>Indicates whether to enroll member accounts of the organization if the account is the
/// management account of an organization.</p>
pub fn set_include_member_accounts(mut self, input: std::option::Option<bool>) -> Self {
self.include_member_accounts = input;
self
}
/// Consumes the builder and constructs a [`UpdateEnrollmentStatusInput`](crate::input::UpdateEnrollmentStatusInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::UpdateEnrollmentStatusInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::UpdateEnrollmentStatusInput {
status: self.status,
include_member_accounts: self.include_member_accounts.unwrap_or_default(),
})
}
}
}
#[doc(hidden)]
pub type UpdateEnrollmentStatusInputOperationOutputAlias = crate::operation::UpdateEnrollmentStatus;
#[doc(hidden)]
pub type UpdateEnrollmentStatusInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl UpdateEnrollmentStatusInput {
/// Consumes the builder and constructs an Operation<[`UpdateEnrollmentStatus`](crate::operation::UpdateEnrollmentStatus)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::UpdateEnrollmentStatus,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::UpdateEnrollmentStatusInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::UpdateEnrollmentStatusInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::UpdateEnrollmentStatusInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.0",
);
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"ComputeOptimizerService.UpdateEnrollmentStatus",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_update_enrollment_status(
&self,
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::UpdateEnrollmentStatus::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"UpdateEnrollmentStatus",
"computeoptimizer",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`UpdateEnrollmentStatusInput`](crate::input::UpdateEnrollmentStatusInput)
pub fn builder() -> crate::input::update_enrollment_status_input::Builder {
crate::input::update_enrollment_status_input::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateEnrollmentStatusInput {
/// <p>The new enrollment status of the account.</p>
///
/// <p>The following status options are available:</p>
///
/// <ul>
/// <li>
/// <p>
/// <code>Active</code> - Opts in your account to the Compute Optimizer service.
/// Compute Optimizer begins analyzing the configuration and utilization metrics
/// of your Amazon Web Services resources after you opt in. For more information, see
/// <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/metrics.html">Metrics analyzed by Compute Optimizer</a> in the <i>Compute Optimizer User Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Inactive</code> - Opts out your account from the Compute Optimizer
/// service. Your account's recommendations and related metrics data will be deleted
/// from Compute Optimizer after you opt out.</p>
/// </li>
/// </ul>
///
/// <note>
/// <p>The <code>Pending</code> and <code>Failed</code> options cannot be used to update
/// the enrollment status of an account. They are returned in the response of a request
/// to update the enrollment status of an account.</p>
/// </note>
pub status: std::option::Option<crate::model::Status>,
/// <p>Indicates whether to enroll member accounts of the organization if the account is the
/// management account of an organization.</p>
pub include_member_accounts: bool,
}
impl UpdateEnrollmentStatusInput {
/// <p>The new enrollment status of the account.</p>
///
/// <p>The following status options are available:</p>
///
/// <ul>
/// <li>
/// <p>
/// <code>Active</code> - Opts in your account to the Compute Optimizer service.
/// Compute Optimizer begins analyzing the configuration and utilization metrics
/// of your Amazon Web Services resources after you opt in. For more information, see
/// <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/metrics.html">Metrics analyzed by Compute Optimizer</a> in the <i>Compute Optimizer User Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Inactive</code> - Opts out your account from the Compute Optimizer
/// service. Your account's recommendations and related metrics data will be deleted
/// from Compute Optimizer after you opt out.</p>
/// </li>
/// </ul>
///
/// <note>
/// <p>The <code>Pending</code> and <code>Failed</code> options cannot be used to update
/// the enrollment status of an account. They are returned in the response of a request
/// to update the enrollment status of an account.</p>
/// </note>
pub fn status(&self) -> std::option::Option<&crate::model::Status> {
self.status.as_ref()
}
/// <p>Indicates whether to enroll member accounts of the organization if the account is the
/// management account of an organization.</p>
pub fn include_member_accounts(&self) -> bool {
self.include_member_accounts
}
}
impl std::fmt::Debug for UpdateEnrollmentStatusInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateEnrollmentStatusInput");
formatter.field("status", &self.status);
formatter.field("include_member_accounts", &self.include_member_accounts);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetRecommendationSummariesInput {
/// <p>The ID of the Amazon Web Services account for which to return recommendation
/// summaries.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return recommendation summaries.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The token to advance to the next page of recommendation summaries.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of recommendation summaries to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
}
impl GetRecommendationSummariesInput {
/// <p>The ID of the Amazon Web Services account for which to return recommendation
/// summaries.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return recommendation summaries.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>The token to advance to the next page of recommendation summaries.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of recommendation summaries to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
}
impl std::fmt::Debug for GetRecommendationSummariesInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetRecommendationSummariesInput");
formatter.field("account_ids", &self.account_ids);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetLambdaFunctionRecommendationsInput {
/// <p>The Amazon Resource Name (ARN) of the functions for which to return
/// recommendations.</p>
///
/// <p>You can specify a qualified or unqualified ARN. If you specify an unqualified ARN
/// without a function version suffix, Compute Optimizer will return recommendations for the
/// latest (<code>$LATEST</code>) version of the function. If you specify a qualified ARN
/// with a version suffix, Compute Optimizer will return recommendations for the specified
/// function version. For more information about using function versions, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html#versioning-versions-using">Using
/// versions</a> in the <i>Lambda Developer
/// Guide</i>.</p>
pub function_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The ID of the Amazon Web Services account for which to return function
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return function recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An array of objects to specify a filter that returns a more specific list of function
/// recommendations.</p>
pub filters:
std::option::Option<std::vec::Vec<crate::model::LambdaFunctionRecommendationFilter>>,
/// <p>The token to advance to the next page of function recommendations.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of function recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
}
impl GetLambdaFunctionRecommendationsInput {
/// <p>The Amazon Resource Name (ARN) of the functions for which to return
/// recommendations.</p>
///
/// <p>You can specify a qualified or unqualified ARN. If you specify an unqualified ARN
/// without a function version suffix, Compute Optimizer will return recommendations for the
/// latest (<code>$LATEST</code>) version of the function. If you specify a qualified ARN
/// with a version suffix, Compute Optimizer will return recommendations for the specified
/// function version. For more information about using function versions, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html#versioning-versions-using">Using
/// versions</a> in the <i>Lambda Developer
/// Guide</i>.</p>
pub fn function_arns(&self) -> std::option::Option<&[std::string::String]> {
self.function_arns.as_deref()
}
/// <p>The ID of the Amazon Web Services account for which to return function
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return function recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>An array of objects to specify a filter that returns a more specific list of function
/// recommendations.</p>
pub fn filters(
&self,
) -> std::option::Option<&[crate::model::LambdaFunctionRecommendationFilter]> {
self.filters.as_deref()
}
/// <p>The token to advance to the next page of function recommendations.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of function recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
}
impl std::fmt::Debug for GetLambdaFunctionRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetLambdaFunctionRecommendationsInput");
formatter.field("function_arns", &self.function_arns);
formatter.field("account_ids", &self.account_ids);
formatter.field("filters", &self.filters);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetEnrollmentStatusesForOrganizationInput {
/// <p>An array of objects to specify a filter that returns a more specific list of account
/// enrollment statuses.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::EnrollmentFilter>>,
/// <p>The token to advance to the next page of account enrollment statuses.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of account enrollment statuses to return with a single request. You
/// can specify up to 100 statuses to return with each request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
}
impl GetEnrollmentStatusesForOrganizationInput {
/// <p>An array of objects to specify a filter that returns a more specific list of account
/// enrollment statuses.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::EnrollmentFilter]> {
self.filters.as_deref()
}
/// <p>The token to advance to the next page of account enrollment statuses.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of account enrollment statuses to return with a single request. You
/// can specify up to 100 statuses to return with each request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
}
impl std::fmt::Debug for GetEnrollmentStatusesForOrganizationInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetEnrollmentStatusesForOrganizationInput");
formatter.field("filters", &self.filters);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetEnrollmentStatusInput {}
impl std::fmt::Debug for GetEnrollmentStatusInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetEnrollmentStatusInput");
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetEc2RecommendationProjectedMetricsInput {
/// <p>The Amazon Resource Name (ARN) of the instances for which to return recommendation
/// projected metrics.</p>
pub instance_arn: std::option::Option<std::string::String>,
/// <p>The statistic of the projected metrics.</p>
pub stat: std::option::Option<crate::model::MetricStatistic>,
/// <p>The granularity, in seconds, of the projected metrics data points.</p>
pub period: i32,
/// <p>The timestamp of the first projected metrics data point to return.</p>
pub start_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The timestamp of the last projected metrics data point to return.</p>
pub end_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>An object to specify the preferences for the Amazon EC2 recommendation
/// projected metrics to return in the response.</p>
pub recommendation_preferences: std::option::Option<crate::model::RecommendationPreferences>,
}
impl GetEc2RecommendationProjectedMetricsInput {
/// <p>The Amazon Resource Name (ARN) of the instances for which to return recommendation
/// projected metrics.</p>
pub fn instance_arn(&self) -> std::option::Option<&str> {
self.instance_arn.as_deref()
}
/// <p>The statistic of the projected metrics.</p>
pub fn stat(&self) -> std::option::Option<&crate::model::MetricStatistic> {
self.stat.as_ref()
}
/// <p>The granularity, in seconds, of the projected metrics data points.</p>
pub fn period(&self) -> i32 {
self.period
}
/// <p>The timestamp of the first projected metrics data point to return.</p>
pub fn start_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_time.as_ref()
}
/// <p>The timestamp of the last projected metrics data point to return.</p>
pub fn end_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_time.as_ref()
}
/// <p>An object to specify the preferences for the Amazon EC2 recommendation
/// projected metrics to return in the response.</p>
pub fn recommendation_preferences(
&self,
) -> std::option::Option<&crate::model::RecommendationPreferences> {
self.recommendation_preferences.as_ref()
}
}
impl std::fmt::Debug for GetEc2RecommendationProjectedMetricsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetEc2RecommendationProjectedMetricsInput");
formatter.field("instance_arn", &self.instance_arn);
formatter.field("stat", &self.stat);
formatter.field("period", &self.period);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field(
"recommendation_preferences",
&self.recommendation_preferences,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetEc2InstanceRecommendationsInput {
/// <p>The Amazon Resource Name (ARN) of the instances for which to return
/// recommendations.</p>
pub instance_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The token to advance to the next page of instance recommendations.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of instance recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
/// <p>An array of objects to specify a filter that returns a more specific list of instance
/// recommendations.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
/// <p>The ID of the Amazon Web Services account for which to return instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return instance recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to return in the response.</p>
pub recommendation_preferences: std::option::Option<crate::model::RecommendationPreferences>,
}
impl GetEc2InstanceRecommendationsInput {
/// <p>The Amazon Resource Name (ARN) of the instances for which to return
/// recommendations.</p>
pub fn instance_arns(&self) -> std::option::Option<&[std::string::String]> {
self.instance_arns.as_deref()
}
/// <p>The token to advance to the next page of instance recommendations.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of instance recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
/// <p>An array of objects to specify a filter that returns a more specific list of instance
/// recommendations.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::Filter]> {
self.filters.as_deref()
}
/// <p>The ID of the Amazon Web Services account for which to return instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return instance recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to return in the response.</p>
pub fn recommendation_preferences(
&self,
) -> std::option::Option<&crate::model::RecommendationPreferences> {
self.recommendation_preferences.as_ref()
}
}
impl std::fmt::Debug for GetEc2InstanceRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetEc2InstanceRecommendationsInput");
formatter.field("instance_arns", &self.instance_arns);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.field("filters", &self.filters);
formatter.field("account_ids", &self.account_ids);
formatter.field(
"recommendation_preferences",
&self.recommendation_preferences,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetEbsVolumeRecommendationsInput {
/// <p>The Amazon Resource Name (ARN) of the volumes for which to return
/// recommendations.</p>
pub volume_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The token to advance to the next page of volume recommendations.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of volume recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
/// <p>An array of objects to specify a filter that returns a more specific list of volume
/// recommendations.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::EbsFilter>>,
/// <p>The ID of the Amazon Web Services account for which to return volume
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return volume recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl GetEbsVolumeRecommendationsInput {
/// <p>The Amazon Resource Name (ARN) of the volumes for which to return
/// recommendations.</p>
pub fn volume_arns(&self) -> std::option::Option<&[std::string::String]> {
self.volume_arns.as_deref()
}
/// <p>The token to advance to the next page of volume recommendations.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of volume recommendations to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
/// <p>An array of objects to specify a filter that returns a more specific list of volume
/// recommendations.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::EbsFilter]> {
self.filters.as_deref()
}
/// <p>The ID of the Amazon Web Services account for which to return volume
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return volume recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn | (&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
}
impl std::fmt::Debug for GetEbsVolumeRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetEbsVolumeRecommendationsInput");
formatter.field("volume_arns", &self.volume_arns);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.field("filters", &self.filters);
formatter.field("account_ids", &self.account_ids);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetAutoScalingGroupRecommendationsInput {
/// <p>The ID of the Amazon Web Services account for which to return Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return Auto Scaling group
/// recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return
/// recommendations.</p>
pub auto_scaling_group_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The token to advance to the next page of Auto Scaling group
/// recommendations.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of Auto Scaling group recommendations to return with a single
/// request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
/// <p>An array of objects to specify a filter that returns a more specific list of Auto Scaling group recommendations.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to return in the response.</p>
pub recommendation_preferences: std::option::Option<crate::model::RecommendationPreferences>,
}
impl GetAutoScalingGroupRecommendationsInput {
/// <p>The ID of the Amazon Web Services account for which to return Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to return Auto Scaling group
/// recommendations.</p>
///
/// <p>Only one account ID can be specified per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return
/// recommendations.</p>
pub fn auto_scaling_group_arns(&self) -> std::option::Option<&[std::string::String]> {
self.auto_scaling_group_arns.as_deref()
}
/// <p>The token to advance to the next page of Auto Scaling group
/// recommendations.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of Auto Scaling group recommendations to return with a single
/// request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
/// <p>An array of objects to specify a filter that returns a more specific list of Auto Scaling group recommendations.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::Filter]> {
self.filters.as_deref()
}
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to return in the response.</p>
pub fn recommendation_preferences(
&self,
) -> std::option::Option<&crate::model::RecommendationPreferences> {
self.recommendation_preferences.as_ref()
}
}
impl std::fmt::Debug for GetAutoScalingGroupRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetAutoScalingGroupRecommendationsInput");
formatter.field("account_ids", &self.account_ids);
formatter.field("auto_scaling_group_arns", &self.auto_scaling_group_arns);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.field("filters", &self.filters);
formatter.field(
"recommendation_preferences",
&self.recommendation_preferences,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ExportLambdaFunctionRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export Lambda
/// function recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An array of objects to specify a filter that exports a more specific set of Lambda function recommendations.</p>
pub filters:
std::option::Option<std::vec::Vec<crate::model::LambdaFunctionRecommendationFilter>>,
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fields_to_export:
std::option::Option<std::vec::Vec<crate::model::ExportableLambdaFunctionField>>,
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub file_format: std::option::Option<crate::model::FileFormat>,
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub include_member_accounts: bool,
}
impl ExportLambdaFunctionRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export Lambda
/// function recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>An array of objects to specify a filter that exports a more specific set of Lambda function recommendations.</p>
pub fn filters(
&self,
) -> std::option::Option<&[crate::model::LambdaFunctionRecommendationFilter]> {
self.filters.as_deref()
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
&self,
) -> std::option::Option<&[crate::model::ExportableLambdaFunctionField]> {
self.fields_to_export.as_deref()
}
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(&self) -> std::option::Option<&crate::model::S3DestinationConfig> {
self.s3_destination_config.as_ref()
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(&self) -> std::option::Option<&crate::model::FileFormat> {
self.file_format.as_ref()
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(&self) -> bool {
self.include_member_accounts
}
}
impl std::fmt::Debug for ExportLambdaFunctionRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ExportLambdaFunctionRecommendationsInput");
formatter.field("account_ids", &self.account_ids);
formatter.field("filters", &self.filters);
formatter.field("fields_to_export", &self.fields_to_export);
formatter.field("s3_destination_config", &self.s3_destination_config);
formatter.field("file_format", &self.file_format);
formatter.field("include_member_accounts", &self.include_member_accounts);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ExportEc2InstanceRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An array of objects to specify a filter that exports a more specific set of instance
/// recommendations.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fields_to_export: std::option::Option<std::vec::Vec<crate::model::ExportableInstanceField>>,
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub file_format: std::option::Option<crate::model::FileFormat>,
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub include_member_accounts: bool,
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to export.</p>
pub recommendation_preferences: std::option::Option<crate::model::RecommendationPreferences>,
}
impl ExportEc2InstanceRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export instance
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>An array of objects to specify a filter that exports a more specific set of instance
/// recommendations.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::Filter]> {
self.filters.as_deref()
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
&self,
) -> std::option::Option<&[crate::model::ExportableInstanceField]> {
self.fields_to_export.as_deref()
}
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(&self) -> std::option::Option<&crate::model::S3DestinationConfig> {
self.s3_destination_config.as_ref()
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(&self) -> std::option::Option<&crate::model::FileFormat> {
self.file_format.as_ref()
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(&self) -> bool {
self.include_member_accounts
}
/// <p>An object to specify the preferences for the Amazon EC2 instance
/// recommendations to export.</p>
pub fn recommendation_preferences(
&self,
) -> std::option::Option<&crate::model::RecommendationPreferences> {
self.recommendation_preferences.as_ref()
}
}
impl std::fmt::Debug for ExportEc2InstanceRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ExportEc2InstanceRecommendationsInput");
formatter.field("account_ids", &self.account_ids);
formatter.field("filters", &self.filters);
formatter.field("fields_to_export", &self.fields_to_export);
formatter.field("s3_destination_config", &self.s3_destination_config);
formatter.field("file_format", &self.file_format);
formatter.field("include_member_accounts", &self.include_member_accounts);
formatter.field(
"recommendation_preferences",
&self.recommendation_preferences,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ExportEbsVolumeRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export Amazon EBS
/// volume recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An array of objects to specify a filter that exports a more specific set of Amazon EBS volume recommendations.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::EbsFilter>>,
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fields_to_export: std::option::Option<std::vec::Vec<crate::model::ExportableVolumeField>>,
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub file_format: std::option::Option<crate::model::FileFormat>,
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub include_member_accounts: bool,
}
impl ExportEbsVolumeRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export Amazon EBS
/// volume recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>An array of objects to specify a filter that exports a more specific set of Amazon EBS volume recommendations.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::EbsFilter]> {
self.filters.as_deref()
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(&self) -> std::option::Option<&[crate::model::ExportableVolumeField]> {
self.fields_to_export.as_deref()
}
/// <p>Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and
/// key prefix for a recommendations export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permission
/// policy to allow Compute Optimizer to write the export file to it. If you plan to specify
/// an object prefix when you create the export job, you must include the object prefix in
/// the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(&self) -> std::option::Option<&crate::model::S3DestinationConfig> {
self.s3_destination_config.as_ref()
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(&self) -> std::option::Option<&crate::model::FileFormat> {
self.file_format.as_ref()
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(&self) -> bool {
self.include_member_accounts
}
}
impl std::fmt::Debug for ExportEbsVolumeRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ExportEbsVolumeRecommendationsInput");
formatter.field("account_ids", &self.account_ids);
formatter.field("filters", &self.filters);
formatter.field("fields_to_export", &self.fields_to_export);
formatter.field("s3_destination_config", &self.s3_destination_config);
formatter.field("file_format", &self.file_format);
formatter.field("include_member_accounts", &self.include_member_accounts);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ExportAutoScalingGroupRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub account_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An array of objects to specify a filter that exports a more specific set of Auto Scaling group recommendations.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fields_to_export:
std::option::Option<std::vec::Vec<crate::model::ExportableAutoScalingGroupField>>,
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub s3_destination_config: std::option::Option<crate::model::S3DestinationConfig>,
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub file_format: std::option::Option<crate::model::FileFormat>,
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub include_member_accounts: bool,
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to export.</p>
pub recommendation_preferences: std::option::Option<crate::model::RecommendationPreferences>,
}
impl ExportAutoScalingGroupRecommendationsInput {
/// <p>The IDs of the Amazon Web Services accounts for which to export Auto Scaling group
/// recommendations.</p>
///
/// <p>If your account is the management account of an organization, use this parameter to
/// specify the member account for which you want to export recommendations.</p>
///
/// <p>This parameter cannot be specified together with the include member accounts
/// parameter. The parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the include member accounts parameter, is omitted.</p>
///
/// <p>You can specify multiple account IDs per request.</p>
pub fn account_ids(&self) -> std::option::Option<&[std::string::String]> {
self.account_ids.as_deref()
}
/// <p>An array of objects to specify a filter that exports a more specific set of Auto Scaling group recommendations.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::Filter]> {
self.filters.as_deref()
}
/// <p>The recommendations data to include in the export file. For more information about the
/// fields that can be exported, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html#exported-files">Exported files</a> in the <i>Compute Optimizer User
/// Guide</i>.</p>
pub fn fields_to_export(
&self,
) -> std::option::Option<&[crate::model::ExportableAutoScalingGroupField]> {
self.fields_to_export.as_deref()
}
/// <p>An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket
/// name and key prefix for the export job.</p>
///
/// <p>You must create the destination Amazon S3 bucket for your recommendations
/// export before you create the export job. Compute Optimizer does not create the S3 bucket
/// for you. After you create the S3 bucket, ensure that it has the required permissions
/// policy to allow Compute Optimizer to write the export file to it. If you plan to
/// specify an object prefix when you create the export job, you must include the object
/// prefix in the policy that you add to the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/create-s3-bucket-policy-for-compute-optimizer.html">Amazon S3 Bucket Policy for Compute Optimizer</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
pub fn s3_destination_config(&self) -> std::option::Option<&crate::model::S3DestinationConfig> {
self.s3_destination_config.as_ref()
}
/// <p>The format of the export file.</p>
///
/// <p>The only export file format currently supported is <code>Csv</code>.</p>
pub fn file_format(&self) -> std::option::Option<&crate::model::FileFormat> {
self.file_format.as_ref()
}
/// <p>Indicates whether to include recommendations for resources in all member accounts of
/// the organization if your account is the management account of an organization.</p>
///
/// <p>The member accounts must also be opted in to Compute Optimizer, and trusted access for
/// Compute Optimizer must be enabled in the organization account. For more information,
/// see <a href="https://docs.aws.amazon.com/compute-optimizer/latest/ug/security-iam.html#trusted-service-access">Compute Optimizer and Amazon Web Services Organizations trusted access</a> in the
/// <i>Compute Optimizer User Guide</i>.</p>
///
/// <p>Recommendations for member accounts of the organization are not included in the export
/// file if this parameter is omitted.</p>
///
/// <p>This parameter cannot be specified together with the account IDs parameter. The
/// parameters are mutually exclusive.</p>
///
/// <p>Recommendations for member accounts are not included in the export if this parameter,
/// or the account IDs parameter, is omitted.</p>
pub fn include_member_accounts(&self) -> bool {
self.include_member_accounts
}
/// <p>An object to specify the preferences for the Auto Scaling group recommendations
/// to export.</p>
pub fn recommendation_preferences(
&self,
) -> std::option::Option<&crate::model::RecommendationPreferences> {
self.recommendation_preferences.as_ref()
}
}
impl std::fmt::Debug for ExportAutoScalingGroupRecommendationsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ExportAutoScalingGroupRecommendationsInput");
formatter.field("account_ids", &self.account_ids);
formatter.field("filters", &self.filters);
formatter.field("fields_to_export", &self.fields_to_export);
formatter.field("s3_destination_config", &self.s3_destination_config);
formatter.field("file_format", &self.file_format);
formatter.field("include_member_accounts", &self.include_member_accounts);
formatter.field(
"recommendation_preferences",
&self.recommendation_preferences,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeRecommendationExportJobsInput {
/// <p>The identification numbers of the export jobs to return.</p>
///
/// <p>An export job ID is returned when you create an export using the <a>ExportAutoScalingGroupRecommendations</a> or <a>ExportEC2InstanceRecommendations</a> actions.</p>
///
/// <p>All export jobs created in the last seven days are returned if this parameter is
/// omitted.</p>
pub job_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An array of objects to specify a filter that returns a more specific list of export
/// jobs.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::JobFilter>>,
/// <p>The token to advance to the next page of export jobs.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of export jobs to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub max_results: std::option::Option<i32>,
}
impl DescribeRecommendationExportJobsInput {
/// <p>The identification numbers of the export jobs to return.</p>
///
/// <p>An export job ID is returned when you create an export using the <a>ExportAutoScalingGroupRecommendations</a> or <a>ExportEC2InstanceRecommendations</a> actions.</p>
///
/// <p>All export jobs created in the last seven days are returned if this parameter is
/// omitted.</p>
pub fn job_ids(&self) -> std::option::Option<&[std::string::String]> {
self.job_ids.as_deref()
}
/// <p>An array of objects to specify a filter that returns a more specific list of export
/// jobs.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::JobFilter]> {
self.filters.as_deref()
}
/// <p>The token to advance to the next page of export jobs.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of export jobs to return with a single request.</p>
///
/// <p>To retrieve the remaining results, make another request with the returned
/// <code>nextToken</code> value.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
}
impl std::fmt::Debug for DescribeRecommendationExportJobsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeRecommendationExportJobsInput");
formatter.field("job_ids", &self.job_ids);
formatter.field("filters", &self.filters);
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
| account_ids |
formatter.rs | use super::{ModuleSet, Symbol};
use crate::ast;
use crate::formatting::ContextualDisplay as _;
use derive_new::new;
use std::borrow::Cow;
use std::fmt;
#[derive(Debug, new)]
pub struct Formatter<M> {
map: M,
}
impl<M: ModuleSet> Formatter<M> {
pub fn symbol_of(&self, construct: impl Into<ast::Construct>) -> Option<&Symbol> {
self.map.symbol_of(construct)
}
pub fn name_of(&self, construct: impl Into<ast::Construct>) -> Cow<str> {
let construct = construct.into();
self.symbol_of(construct).map_or_else(
|| construct.to_string().into(),
|unit| unit.name.as_str().into(),
)
}
}
impl<M: ModuleSet> ast::KindFormatter for Formatter<M> {
fn fmt_kind_use(&self, f: &mut fmt::Formatter<'_>, id: ast::Use<ast::KindUse>) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
}
impl<M: ModuleSet> ast::TypeFormatter for Formatter<M> {
fn fmt_type_use(&self, f: &mut fmt::Formatter<'_>, id: ast::Use<ast::TypeUse>) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
fn fmt_data_type_con(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::NodeId<ast::DataTypeCon>,
) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
fn fmt_builtin_type_con(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::NodeId<ast::BuiltinTypeCon>,
) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
fn fmt_type_parameter(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::NodeId<ast::TypeParameter>,
) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
fn fmt_class_use(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::Use<ast::NodeId<ast::ClassCon>>,
) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
}
impl<M: ModuleSet> ast::ValueConFormatter for Formatter<M> {
fn fmt_data_value_con(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::NodeId<ast::DataValueCon>,
) -> fmt::Result {
if id == ast::builtin::TRUE | else if id == ast::builtin::FALSE {
write!(f, "#f")
} else {
write!(f, "{}", self.name_of(id))
}
}
fn fmt_builtin_value_con(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::NodeId<ast::BuiltinValueCon>,
) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
}
impl<M: ModuleSet> ast::PatternFormatter for Formatter<M> {
fn fmt_pattern_var(
&self,
f: &mut fmt::Formatter<'_>,
id: ast::NodeId<ast::PatternVar>,
) -> fmt::Result {
write!(f, "{}", self.name_of(id))
}
fn fmt_value_con_use(
&self,
f: &mut fmt::Formatter<'_>,
use_: ast::Use<ast::ValueCon>,
) -> fmt::Result {
match use_ {
ast::Use::Unresolved(id) => write!(f, "{}", self.name_of(id)),
ast::Use::Resolved(con, _) => write!(f, "{}", con.fmt_on(self)),
}
}
}
| {
write!(f, "#t")
} |
bundleinstance.py | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from hashlib import sha1
import hmac
import base64
import datetime
from awscli.compat import six
from awscli.arguments import CustomArgument
logger = logging.getLogger('ec2bundleinstance')
# This customization adds the following scalar parameters to the
# bundle-instance operation:
# --bucket:
BUCKET_DOCS = ('The bucket in which to store the AMI. '
'You can specify a bucket that you already own or '
'a new bucket that Amazon EC2 creates on your behalf. '
'If you specify a bucket that belongs to someone else, '
'Amazon EC2 returns an error.')
# --prefix:
PREFIX_DOCS = ('The prefix for the image component names being stored '
'in Amazon S3.')
# --owner-akid
OWNER_AKID_DOCS = 'The access key ID of the owner of the Amazon S3 bucket.'
# --policy
POLICY_DOCS = (
"An Amazon S3 upload policy that gives "
"Amazon EC2 permission to upload items into Amazon S3 "
"on the user's behalf. If you provide this parameter, "
"you must also provide "
"your secret access key, so we can create a policy "
"signature for you (the secret access key is not passed "
"to Amazon EC2). If you do not provide this parameter, "
"we generate an upload policy for you automatically. "
"For more information about upload policies see the "
"sections about policy construction and signatures in the "
'<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev'
'/HTTPPOSTForms.html">'
'Amazon Simple Storage Service Developer Guide</a>.')
# --owner-sak
OWNER_SAK_DOCS = ('The AWS secret access key for the owner of the '
'Amazon S3 bucket specified in the --bucket '
'parameter. This parameter is required so that a '
'signature can be computed for the policy.')
def _add_params(argument_table, **kwargs):
# Add the scalar parameters and also change the complex storage
# param to not be required so the user doesn't get an error from
# argparse if they only supply scalar params.
storage_arg = argument_table['storage']
storage_arg.required = False
arg = BundleArgument(storage_param='Bucket',
name='bucket',
help_text=BUCKET_DOCS)
argument_table['bucket'] = arg
arg = BundleArgument(storage_param='Prefix',
name='prefix',
help_text=PREFIX_DOCS)
argument_table['prefix'] = arg
arg = BundleArgument(storage_param='AWSAccessKeyId',
name='owner-akid',
help_text=OWNER_AKID_DOCS)
argument_table['owner-akid'] = arg
arg = BundleArgument(storage_param='_SAK',
name='owner-sak',
help_text=OWNER_SAK_DOCS)
argument_table['owner-sak'] = arg
arg = BundleArgument(storage_param='UploadPolicy',
name='policy',
help_text=POLICY_DOCS)
argument_table['policy'] = arg
def _check_args(parsed_args, **kwargs):
# This function checks the parsed args. If the user specified
# the --ip-permissions option with any of the scalar options we
# raise an error.
logger.debug(parsed_args)
arg_dict = vars(parsed_args)
if arg_dict['storage']:
for key in ('bucket', 'prefix', 'owner_akid',
'owner_sak', 'policy'):
if arg_dict[key]:
msg = ('Mixing the --storage option '
'with the simple, scalar options is '
'not recommended.')
raise ValueError(msg)
POLICY = ('{{"expiration": "{expires}",'
'"conditions": ['
'{{"bucket": "{bucket}"}},'
'{{"acl": "ec2-bundle-read"}},'
'["starts-with", "$key", "{prefix}"]'
']}}'
)
def _generate_policy(params):
# Called if there is no policy supplied by the user.
# Creates a policy that provides access for 24 hours.
delta = datetime.timedelta(hours=24)
expires = datetime.datetime.utcnow() + delta
expires_iso = expires.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
policy = POLICY.format(expires=expires_iso,
bucket=params['Bucket'],
prefix=params['Prefix'])
params['UploadPolicy'] = policy
def _generate_signature(params):
# If we have a policy and a sak, create the signature.
policy = params.get('UploadPolicy')
sak = params.get('_SAK')
if policy and sak:
policy = base64.b64encode(six.b(policy)).decode('utf-8')
new_hmac = hmac.new(sak.encode('utf-8'), digestmod=sha1)
new_hmac.update(six.b(policy))
ps = base64.encodestring(new_hmac.digest()).strip().decode('utf-8')
params['UploadPolicySignature'] = ps
del params['_SAK']
def _check_params(params, **kwargs):
# Called just before call but prior to building the params.
# Adds information not supplied by the user.
storage = params['Storage']['S3']
if 'UploadPolicy' not in storage:
_generate_policy(storage)
if 'UploadPolicySignature' not in storage:
_generate_signature(storage)
EVENTS = [
('building-argument-table.ec2.bundle-instance', _add_params),
('operation-args-parsed.ec2.bundle-instance', _check_args),
('before-parameter-build.ec2.BundleInstance', _check_params),
]
def register_bundleinstance(event_handler):
# Register all of the events for customizing BundleInstance
for event, handler in EVENTS:
event_handler.register(event, handler)
class BundleArgument(CustomArgument):
def | (self, storage_param, *args, **kwargs):
super(BundleArgument, self).__init__(*args, **kwargs)
self._storage_param = storage_param
def _build_storage(self, params, value):
# Build up the Storage data structure
if 'Storage' not in params:
params['Storage'] = {'S3': {}}
params['Storage']['S3'][self._storage_param] = value
def add_to_params(self, parameters, value):
if value:
self._build_storage(parameters, value)
| __init__ |
Choice.tsx | import * as React from 'react';
import {classNames} from '@shopify/react-utilities/styles';
import {Error} from '../../types';
import InlineError from '../InlineError';
import styles from './Choice.scss';
export interface Props {
/** A unique identifier for the choice */
id: string;
/** Label for the choice */
label: React.ReactNode;
/** Whether the associated form control is disabled */
disabled?: Boolean;
/** Display an error message */
error?: Error | boolean;
/** Visually hide the label */
labelHidden?: boolean;
/** Content to display inside the choice */
children?: React.ReactNode;
/** Additional text to aide in use */
helpText?: React.ReactNode;
/** Callback when clicked */
onClick?(): void;
}
export default function Choice({
id,
label,
disabled,
error,
children,
labelHidden,
helpText,
onClick,
}: Props) {
function handleClick() {
if (disabled || !onClick) return;
onClick();
}
const className = classNames(
styles.Choice,
labelHidden && styles.labelHidden,
disabled && styles.disabled,
);
const labelMarkup = (
<label className={className} htmlFor={id} onClick={handleClick}>
<span className={styles.Control}>{children}</span>
<span className={styles.Label}>{label}</span>
</label>
);
const helpTextMarkup = helpText ? (
<div className={styles.HelpText} id={helpTextID(id)}>
{helpText}
</div>
) : null;
const errorMarkup = error &&
typeof error !== 'boolean' && (
<div className={styles.Error}>
<InlineError message={error} fieldID={id} />
</div>
);
const descriptionMarkup =
helpTextMarkup || errorMarkup ? (
<div className={styles.Descriptions}>
{errorMarkup}
{helpTextMarkup}
</div>
) : null;
return descriptionMarkup ? (
<div>
{labelMarkup}
{descriptionMarkup}
</div>
) : (
labelMarkup
);
}
export function | (id: string) {
return `${id}HelpText`;
}
| helpTextID |
docs.tsx | import React from "react" | import Layout from "../components/layout/layout"
import SEO from "../components/seo"
import styles from '../styles/index.module.css'
import anapred from '../images/anapred.png';
import { Container, Typography } from "@material-ui/core"
import Button from "../app/components/Button";
import { navigate } from "gatsby";
import { APP_BASE_ROUTE } from "../app/routes/routes";
const IndexPage = () => (
<Layout>
<SEO title="DOCS" />
<Container className={styles.wrapper}>
<Typography variant="h1" id={styles.pageTitle}>DOCUMENTATION</Typography>
<img src={anapred} alt="anapred" width="100%" id={styles.anapredImage}></img>
<Container className={styles.buttons}>
<Button text="GO TO APP" onClick ={e=> navigate(APP_BASE_ROUTE)}></Button>
</Container>
</Container>
</Layout>
)
export default IndexPage | |
todo_args.rs | use clap::ArgMatches;
use std::str::FromStr;
use crate::model::{Priority, SmartDate};
#[derive(Debug)]
pub struct ToDoArgs {
pub list: Option<String>,
pub mark_for_today: bool,
pub priority: Option<String>,
pub due_date: Option<String>,
pub free_args: Vec<String>,
}
impl ToDoArgs {
pub fn parse_priority(&self) -> Result<Option<Priority>, String> {
match &self.priority {
None => Ok(None),
Some(input) => Priority::from_str(input.as_str()).map(Some),
}
}
pub fn parse_due_date(self: &Self) -> Result<Option<SmartDate>, String> {
match &self.due_date {
None => Ok(None),
Some(input) => SmartDate::from_str(input).map(Some),
}
}
pub fn | (args: &ArgMatches) -> ToDoArgs {
let extract = |arg: &str| arg.get(1..arg.len()).map(|a| a.to_string());
let mut list = None;
let mut priority = None;
let mut due_date = None;
let mut free_args = vec![];
let mark_for_today = args.is_present("today");
args.values_of("INPUT").unwrap().for_each(|arg| {
if arg.starts_with(':') && arg.len() > 1 {
list = extract(arg);
} else if arg.starts_with('+') && arg.len() > 1 {
priority = extract(arg);
} else if arg.starts_with('@') && arg.len() > 1 {
due_date = extract(arg);
} else {
free_args.push(arg.to_string());
}
});
ToDoArgs {
list,
mark_for_today,
priority,
due_date,
free_args,
}
}
}
#[cfg(test)]
mod test {
use crate::command::todo_args::ToDoArgs;
use crate::model::SmartDate;
use std::str::FromStr;
#[test]
fn test_parse_due_date() {
let mut add = ToDoArgs {
list: None,
mark_for_today: false,
due_date: Some("20200202".to_string()),
priority: None,
free_args: vec![],
};
assert_eq!(
add.parse_due_date().unwrap(),
Some(SmartDate::from_str("20200202").unwrap())
);
add.due_date = None;
assert_eq!(add.parse_due_date().unwrap(), None);
}
}
| parse |
Query.ts | limit?: number;
offset?: number;
} | export interface IQuery {
sort?: object;
filter?: any; |
|
f787.go | package internal
import (
"math"
"unsafe"
)
func | (ctx *Context, l0 int32, l1 int32, l2 float32, l3 float32, l4 float32, l5 float32) {
var l6 int32
_ = l6
var l7 int32
_ = l7
var l8 int32
_ = l8
var l9 int32
_ = l9
var l10 float32
_ = l10
var l11 float32
_ = l11
var l12 float32
_ = l12
var l13 float32
_ = l13
var l14 float32
_ = l14
var l15 float32
_ = l15
var s0i32 int32
_ = s0i32
var s1i32 int32
_ = s1i32
var s2i32 int32
_ = s2i32
var s3i32 int32
_ = s3i32
var s4i32 int32
_ = s4i32
var s5i32 int32
_ = s5i32
var s6i32 int32
_ = s6i32
var s7i32 int32
_ = s7i32
var s0f32 float32
_ = s0f32
var s1f32 float32
_ = s1f32
var s2f32 float32
_ = s2f32
var s3f32 float32
_ = s3f32
var s4f32 float32
_ = s4f32
var s5f32 float32
_ = s5f32
var s6f32 float32
_ = s6f32
var s7f32 float32
_ = s7f32
var s8f32 float32
_ = s8f32
s0f32 = l4
s0i32 = int32(math.Float32bits(s0f32))
l8 = s0i32
s1i32 = 2147483647
s0i32 = s0i32 & s1i32
l9 = s0i32
s0f32 = float32(uint32(s0i32))
s1f32 = 1.1920929e-07
s0f32 = s0f32 * s1f32
s1f32 = -124.22552
s0f32 = s0f32 + s1f32
s1i32 = l8
s2i32 = 8388607
s1i32 = s1i32 & s2i32
s2i32 = 1056964608
s1i32 = s1i32 | s2i32
s1f32 = math.Float32frombits(uint32(s1i32))
l4 = s1f32
s2f32 = 1.4980303
s1f32 = s1f32 * s2f32
s0f32 = s0f32 - s1f32
s1f32 = 1.72588
s2f32 = l4
s3f32 = 0.35208872
s2f32 = s2f32 + s3f32
s1f32 = s1f32 / s2f32
s0f32 = s0f32 - s1f32
s1i32 = l1
s1i32 = *(*int32)(unsafe.Pointer(&ctx.Mem[int(s1i32+0)]))
l7 = s1i32
s1f32 = *(*float32)(unsafe.Pointer(&ctx.Mem[int(s1i32+8)]))
l10 = s1f32
s0f32 = s0f32 * s1f32
l4 = s0f32
s1f32 = 121.274055
s0f32 = s0f32 + s1f32
s1f32 = l4
s2f32 = l4
s2f32 = float32(math.Floor(float64(s2f32)))
s1f32 = s1f32 - s2f32
l4 = s1f32
s2f32 = 1.4901291
s1f32 = s1f32 * s2f32
s0f32 = s0f32 - s1f32
s1f32 = 27.728024
s2f32 = 4.8425255
s3f32 = l4
s2f32 = s2f32 - s3f32
s1f32 = s1f32 / s2f32
s0f32 = s0f32 + s1f32
s1f32 = 8.388608e+06
s0f32 = s0f32 * s1f32
s1f32 = 0.5
s0f32 = s0f32 + s1f32
l4 = s0f32
s1f32 = 4.2949673e+09
if s0f32 < s1f32 {
s0i32 = 1
} else {
s0i32 = 0
}
s1f32 = l4
s2f32 = 0
if s1f32 >= s2f32 {
s1i32 = 1
} else {
s1i32 = 0
}
s0i32 = s0i32 & s1i32
if s0i32 != 0 {
s0f32 = l4
s0i32 = int32(uint32(math.Trunc(float64(s0f32))))
goto lbl0
}
s0i32 = 0
lbl0:
l6 = s0i32
s0i32 = l7
s0f32 = *(*float32)(unsafe.Pointer(&ctx.Mem[int(s0i32+4)]))
l12 = s0f32
s1i32 = l6
s1f32 = math.Float32frombits(uint32(s1i32))
s2i32 = l9
s2f32 = math.Float32frombits(uint32(s2i32))
l4 = s2f32
s3f32 = l4
s4f32 = 1
if s3f32 != s4f32 {
s3i32 = 1
} else {
s3i32 = 0
}
if s3i32 != 0 {
// s1f32 = s1f32
} else {
s1f32 = s2f32
}
s2f32 = l4
s3f32 = l4
s4f32 = 0
if s3f32 != s4f32 {
s3i32 = 1
} else {
s3i32 = 0
}
if s3i32 != 0 {
// s1f32 = s1f32
} else {
s1f32 = s2f32
}
s0f32 = s0f32 * s1f32
s1i32 = l7
s1f32 = *(*float32)(unsafe.Pointer(&ctx.Mem[int(s1i32+20)]))
l13 = s1f32
s2i32 = l7
s2f32 = *(*float32)(unsafe.Pointer(&ctx.Mem[int(s2i32+12)]))
l14 = s2f32
s3f32 = l4
s4i32 = l7
s4f32 = *(*float32)(unsafe.Pointer(&ctx.Mem[int(s4i32+16)]))
l15 = s4f32
s3f32 = s3f32 - s4f32
s3i32 = int32(math.Float32bits(s3f32))
l6 = s3i32
s3f32 = float32(uint32(s3i32))
s4f32 = 1.1920929e-07
s3f32 = s3f32 * s4f32
s4f32 = -124.22552
s3f32 = s3f32 + s4f32
s4i32 = l6
s5i32 = 8388607
s4i32 = s4i32 & s5i32
s5i32 = 1056964608
s4i32 = s4i32 | s5i32
s4f32 = math.Float32frombits(uint32(s4i32))
l11 = s4f32
s5f32 = 1.4980303
s4f32 = s4f32 * s5f32
s3f32 = s3f32 - s4f32
s4f32 = 1.72588
s5f32 = l11
s6f32 = 0.35208872
s5f32 = s5f32 + s6f32
s4f32 = s4f32 / s5f32
s3f32 = s3f32 - s4f32
s4f32 = 0.6931472
s3f32 = s3f32 * s4f32
s2f32 = s2f32 * s3f32
s1f32 = s1f32 + s2f32
s2f32 = l4
s3f32 = 1
if s2f32 <= s3f32 {
s2i32 = 1
} else {
s2i32 = 0
}
if s2i32 != 0 {
// s0f32 = s0f32
} else {
s0f32 = s1f32
}
s0i32 = int32(math.Float32bits(s0f32))
s1i32 = l8
s2i32 = -2147483648
s1i32 = s1i32 & s2i32
s0i32 = s0i32 | s1i32
s0f32 = math.Float32frombits(uint32(s0i32))
l4 = s0f32
s0f32 = l12
s1f32 = l3
s1i32 = int32(math.Float32bits(s1f32))
l7 = s1i32
s2i32 = 2147483647
s1i32 = s1i32 & s2i32
l9 = s1i32
s1f32 = float32(uint32(s1i32))
s2f32 = 1.1920929e-07
s1f32 = s1f32 * s2f32
s2f32 = -124.22552
s1f32 = s1f32 + s2f32
s2i32 = l7
s3i32 = 8388607
s2i32 = s2i32 & s3i32
s3i32 = 1056964608
s2i32 = s2i32 | s3i32
s2f32 = math.Float32frombits(uint32(s2i32))
l3 = s2f32
s3f32 = 1.4980303
s2f32 = s2f32 * s3f32
s1f32 = s1f32 - s2f32
s2f32 = 1.72588
s3f32 = l3
s4f32 = 0.35208872
s3f32 = s3f32 + s4f32
s2f32 = s2f32 / s3f32
s1f32 = s1f32 - s2f32
s2f32 = l10
s1f32 = s1f32 * s2f32
l3 = s1f32
s2f32 = 121.274055
s1f32 = s1f32 + s2f32
s2f32 = l3
s3f32 = l3
s3f32 = float32(math.Floor(float64(s3f32)))
s2f32 = s2f32 - s3f32
l3 = s2f32
s3f32 = 1.4901291
s2f32 = s2f32 * s3f32
s1f32 = s1f32 - s2f32
s2f32 = 27.728024
s3f32 = 4.8425255
s4f32 = l3
s3f32 = s3f32 - s4f32
s2f32 = s2f32 / s3f32
s1f32 = s1f32 + s2f32
s2f32 = 8.388608e+06
s1f32 = s1f32 * s2f32
s2f32 = 0.5
s1f32 = s1f32 + s2f32
l3 = s1f32
s2f32 = 4.2949673e+09
if s1f32 < s2f32 {
s1i32 = 1
} else {
s1i32 = 0
}
s2f32 = l3
s3f32 = 0
if s2f32 >= s3f32 {
s2i32 = 1
} else {
s2i32 = 0
}
s1i32 = s1i32 & s2i32
if s1i32 != 0 {
s1f32 = l3
s1i32 = int32(uint32(math.Trunc(float64(s1f32))))
goto lbl2
}
s1i32 = 0
lbl2:
s1f32 = math.Float32frombits(uint32(s1i32))
s2i32 = l9
s2f32 = math.Float32frombits(uint32(s2i32))
l3 = s2f32
s3f32 = l3
s4f32 = 1
if s3f32 != s4f32 {
s3i32 = 1
} else {
s3i32 = 0
}
if s3i32 != 0 {
// s1f32 = s1f32
} else {
s1f32 = s2f32
}
s2f32 = l3
s3f32 = l3
s4f32 = 0
if s3f32 != s4f32 {
s3i32 = 1
} else {
s3i32 = 0
}
if s3i32 != 0 {
// s1f32 = s1f32
} else {
s1f32 = s2f32
}
s0f32 = s0f32 * s1f32
s1f32 = l13
s2f32 = l14
s3f32 = l3
s4f32 = l15
s3f32 = s3f32 - s4f32
s3i32 = int32(math.Float32bits(s3f32))
l6 = s3i32
s3f32 = float32(uint32(s3i32))
s4f32 = 1.1920929e-07
s3f32 = s3f32 * s4f32
s4f32 = -124.22552
s3f32 = s3f32 + s4f32
s4i32 = l6
s5i32 = 8388607
s4i32 = s4i32 & s5i32
s5i32 = 1056964608
s4i32 = s4i32 | s5i32
s4f32 = math.Float32frombits(uint32(s4i32))
l11 = s4f32
s5f32 = 1.4980303
s4f32 = s4f32 * s5f32
s3f32 = s3f32 - s4f32
s4f32 = 1.72588
s5f32 = l11
s6f32 = 0.35208872
s5f32 = s5f32 + s6f32
s4f32 = s4f32 / s5f32
s3f32 = s3f32 - s4f32
s4f32 = 0.6931472
s3f32 = s3f32 * s4f32
s2f32 = s2f32 * s3f32
s1f32 = s1f32 + s2f32
s2f32 = l3
s3f32 = 1
if s2f32 <= s3f32 {
s2i32 = 1
} else {
s2i32 = 0
}
if s2i32 != 0 {
// s0f32 = s0f32
} else {
s0f32 = s1f32
}
s0i32 = int32(math.Float32bits(s0f32))
s1i32 = l7
s2i32 = -2147483648
s1i32 = s1i32 & s2i32
s0i32 = s0i32 | s1i32
s0f32 = math.Float32frombits(uint32(s0i32))
l3 = s0f32
s0i32 = l0
s1i32 = l1
s2i32 = 8
s1i32 = s1i32 + s2i32
s2f32 = l12
s3f32 = l2
s3i32 = int32(math.Float32bits(s3f32))
l6 = s3i32
s4i32 = 2147483647
s3i32 = s3i32 & s4i32
l8 = s3i32
s3f32 = float32(uint32(s3i32))
s4f32 = 1.1920929e-07
s3f32 = s3f32 * s4f32
s4f32 = -124.22552
s3f32 = s3f32 + s4f32
s4i32 = l6
s5i32 = 8388607
s4i32 = s4i32 & s5i32
s5i32 = 1056964608
s4i32 = s4i32 | s5i32
s4f32 = math.Float32frombits(uint32(s4i32))
l2 = s4f32
s5f32 = 1.4980303
s4f32 = s4f32 * s5f32
s3f32 = s3f32 - s4f32
s4f32 = 1.72588
s5f32 = l2
s6f32 = 0.35208872
s5f32 = s5f32 + s6f32
s4f32 = s4f32 / s5f32
s3f32 = s3f32 - s4f32
s4f32 = l10
s3f32 = s3f32 * s4f32
l2 = s3f32
s4f32 = 121.274055
s3f32 = s3f32 + s4f32
s4f32 = l2
s5f32 = l2
s5f32 = float32(math.Floor(float64(s5f32)))
s4f32 = s4f32 - s5f32
l2 = s4f32
s5f32 = 1.4901291
s4f32 = s4f32 * s5f32
s3f32 = s3f32 - s4f32
s4f32 = 27.728024
s5f32 = 4.8425255
s6f32 = l2
s5f32 = s5f32 - s6f32
s4f32 = s4f32 / s5f32
s3f32 = s3f32 + s4f32
s4f32 = 8.388608e+06
s3f32 = s3f32 * s4f32
s4f32 = 0.5
s3f32 = s3f32 + s4f32
l2 = s3f32
s4f32 = 4.2949673e+09
if s3f32 < s4f32 {
s3i32 = 1
} else {
s3i32 = 0
}
s4f32 = l2
s5f32 = 0
if s4f32 >= s5f32 {
s4i32 = 1
} else {
s4i32 = 0
}
s3i32 = s3i32 & s4i32
if s3i32 != 0 {
s3f32 = l2
s3i32 = int32(uint32(math.Trunc(float64(s3f32))))
goto lbl4
}
s3i32 = 0
lbl4:
s3f32 = math.Float32frombits(uint32(s3i32))
s4i32 = l8
s4f32 = math.Float32frombits(uint32(s4i32))
l2 = s4f32
s5f32 = l2
s6f32 = 1
if s5f32 != s6f32 {
s5i32 = 1
} else {
s5i32 = 0
}
if s5i32 != 0 {
// s3f32 = s3f32
} else {
s3f32 = s4f32
}
s4f32 = l2
s5f32 = l2
s6f32 = 0
if s5f32 != s6f32 {
s5i32 = 1
} else {
s5i32 = 0
}
if s5i32 != 0 {
// s3f32 = s3f32
} else {
s3f32 = s4f32
}
s2f32 = s2f32 * s3f32
s3f32 = l13
s4f32 = l14
s5f32 = l2
s6f32 = l15
s5f32 = s5f32 - s6f32
s5i32 = int32(math.Float32bits(s5f32))
l0 = s5i32
s5f32 = float32(uint32(s5i32))
s6f32 = 1.1920929e-07
s5f32 = s5f32 * s6f32
s6f32 = -124.22552
s5f32 = s5f32 + s6f32
s6i32 = l0
s7i32 = 8388607
s6i32 = s6i32 & s7i32
s7i32 = 1056964608
s6i32 = s6i32 | s7i32
s6f32 = math.Float32frombits(uint32(s6i32))
l10 = s6f32
s7f32 = 1.4980303
s6f32 = s6f32 * s7f32
s5f32 = s5f32 - s6f32
s6f32 = 1.72588
s7f32 = l10
s8f32 = 0.35208872
s7f32 = s7f32 + s8f32
s6f32 = s6f32 / s7f32
s5f32 = s5f32 - s6f32
s6f32 = 0.6931472
s5f32 = s5f32 * s6f32
s4f32 = s4f32 * s5f32
s3f32 = s3f32 + s4f32
s4f32 = l2
s5f32 = 1
if s4f32 <= s5f32 {
s4i32 = 1
} else {
s4i32 = 0
}
if s4i32 != 0 {
// s2f32 = s2f32
} else {
s2f32 = s3f32
}
s2i32 = int32(math.Float32bits(s2f32))
s3i32 = l6
s4i32 = -2147483648
s3i32 = s3i32 & s4i32
s2i32 = s2i32 | s3i32
s2f32 = math.Float32frombits(uint32(s2i32))
s3f32 = l3
s4f32 = l4
s5f32 = l5
s6i32 = l1
s6i32 = *(*int32)(unsafe.Pointer(&ctx.Mem[int(s6i32+4)]))
if int(s6i32) < 0 || int(s6i32) >= len(table) {
panic("table entry out of bounds")
}
if table[s6i32].numParams == -1 {
panic("table entry is nil")
}
if table[s6i32].numParams != 6 {
panic("argument count mismatch")
}
(*(*func(*Context, int32, int32, float32, float32, float32, float32))(table[s6i32].f()))(ctx, s0i32, s1i32, s2f32, s3f32, s4f32, s5f32)
}
| f787 |
api.go | package k8s
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/linkerd/linkerd2/pkg/prometheus"
tsclient "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/split/clientset/versioned"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
apiregistration "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
// Load all the auth plugins for the cloud providers.
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
var minAPIVersion = [3]int{1, 13, 0}
// KubernetesAPI provides a client for accessing a Kubernetes cluster.
// TODO: support ServiceProfile ClientSet. A prerequisite is moving the
// ServiceProfile client code from `./controller` to `./pkg` (#2751). This will
// also allow making `NewFakeClientSets` private, as KubernetesAPI will support
// all relevant k8s resources.
type KubernetesAPI struct {
*rest.Config
kubernetes.Interface
Apiextensions apiextensionsclient.Interface // for CRDs
Apiregistration apiregistration.Interface // for access to APIService
TsClient tsclient.Interface
DynamicClient dynamic.Interface
}
// NewAPI validates a Kubernetes config and returns a client for accessing the
// configured cluster.
func NewAPI(configPath, kubeContext string, impersonate string, impersonateGroup []string, timeout time.Duration) (*KubernetesAPI, error) {
config, err := GetConfig(configPath, kubeContext)
if err != nil {
return nil, fmt.Errorf("error configuring Kubernetes API client: %v", err)
}
return NewAPIForConfig(config, impersonate, impersonateGroup, timeout)
}
// NewAPIForConfig uses a Kubernetes config to construct a client for accessing
// the configured cluster
func NewAPIForConfig(config *rest.Config, impersonate string, impersonateGroup []string, timeout time.Duration) (*KubernetesAPI, error) {
// k8s' client-go doesn't support injecting context
// https://github.com/kubernetes/kubernetes/issues/46503
// but we can set the timeout manually
config.Timeout = timeout
wt := config.WrapTransport
config.WrapTransport = prometheus.ClientWithTelemetry("k8s", wt)
if impersonate != "" {
config.Impersonate = rest.ImpersonationConfig{
UserName: impersonate,
Groups: impersonateGroup,
}
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error configuring Kubernetes API clientset: %v", err)
}
apiextensions, err := apiextensionsclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error configuring Kubernetes API Extensions clientset: %v", err)
}
aggregatorClient, err := apiregistration.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error configuring Kubernetes API server aggregator: %v", err)
}
tsClient, err := tsclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error configuring Traffic Split clientset: %v", err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error configuring Kubernetes Dynamic Client: %v", err)
}
return &KubernetesAPI{
Config: config,
Interface: clientset,
Apiextensions: apiextensions,
Apiregistration: aggregatorClient,
TsClient: tsClient,
DynamicClient: dynamicClient,
}, nil
}
// NewClient returns an http.Client configured with a Transport to connect to
// the Kubernetes cluster.
func (kubeAPI *KubernetesAPI) NewClient() (*http.Client, error) {
secureTransport, err := rest.TransportFor(kubeAPI.Config)
if err != nil {
return nil, fmt.Errorf("error instantiating Kubernetes API client: %v", err)
}
return &http.Client{
Transport: secureTransport,
}, nil
}
// GetVersionInfo returns version.Info for the Kubernetes cluster.
func (kubeAPI *KubernetesAPI) GetVersionInfo() (*version.Info, error) {
return kubeAPI.Discovery().ServerVersion()
}
// CheckVersion validates whether the configured Kubernetes cluster's version is
// running a minimum Kubernetes API version.
func (kubeAPI *KubernetesAPI) CheckVersion(versionInfo *version.Info) error {
apiVersion, err := getK8sVersion(versionInfo.String())
if err != nil {
return err
}
if !isCompatibleVersion(minAPIVersion, apiVersion) {
return fmt.Errorf("Kubernetes is on version [%d.%d.%d], but version [%d.%d.%d] or more recent is required",
apiVersion[0], apiVersion[1], apiVersion[2],
minAPIVersion[0], minAPIVersion[1], minAPIVersion[2])
}
return nil
}
// NamespaceExists validates whether a given namespace exists.
func (kubeAPI *KubernetesAPI) NamespaceExists(ctx context.Context, namespace string) (bool, error) {
ns, err := kubeAPI.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return ns != nil, nil
}
// GetPodsByNamespace returns all pods in a given namespace
func (kubeAPI *KubernetesAPI) GetPodsByNamespace(ctx context.Context, namespace string) ([]corev1.Pod, error) {
podList, err := kubeAPI.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
return podList.Items, nil
}
// GetReplicaSets returns all replicasets in a given namespace
func (kubeAPI *KubernetesAPI) GetReplicaSets(ctx context.Context, namespace string) ([]appsv1.ReplicaSet, error) {
replicaSetList, err := kubeAPI.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
return replicaSetList.Items, nil
}
// GetNamespaceWithExtensionLabel gets the namespace with the LinkerdExtensionLabel label value of `value`
func (kubeAPI *KubernetesAPI) GetNamespaceWithExtensionLabel(ctx context.Context, value string) (*corev1.Namespace, error) {
namespaces, err := kubeAPI.CoreV1().Namespaces().List(ctx, metav1.ListOptions{LabelSelector: LinkerdExtensionLabel})
if err != nil {
return nil, err
}
for _, ns := range namespaces.Items {
if ns.Labels[LinkerdExtensionLabel] == value {
return &ns, err
}
}
return nil, fmt.Errorf("could not find the %s extension", value)
}
// GetPodStatus receives a pod and returns the pod status, based on `kubectl` logic.
// This logic is imported and adapted from the github.com/kubernetes/kubernetes project:
// https://github.com/kubernetes/kubernetes/blob/33a3e325f754d179b25558dee116fca1c67d353a/pkg/printers/internalversion/printers.go#L558-L640
func GetPodStatus(pod corev1.Pod) string {
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" |
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0 && container.State.Terminated.Signal == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = "Init:" + container.State.Waiting.Reason
initializing = true
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
reason = "Running"
}
}
return reason
}
// GetProxyReady returns true if the pod contains a proxy that is ready
func GetProxyReady(pod corev1.Pod) bool {
for _, container := range pod.Status.ContainerStatuses {
if container.Name == ProxyContainerName {
return container.Ready
}
}
return false
}
// GetProxyVersion returns the container proxy's version, if any
func GetProxyVersion(pod corev1.Pod) string {
for _, container := range pod.Spec.Containers {
if container.Name == ProxyContainerName {
parts := strings.Split(container.Image, ":")
return parts[1]
}
}
return ""
}
// GetAddOnsConfigMap returns the data in the add-ons configmap
func GetAddOnsConfigMap(ctx context.Context, kubeAPI kubernetes.Interface, namespace string) (map[string]string, error) {
cm, err := kubeAPI.CoreV1().ConfigMaps(namespace).Get(ctx, AddOnsConfigMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return cm.Data, nil
}
| {
reason = pod.Status.Reason
} |
List004.py | # Lista 04 - Itanu Romero - 2o. semestre
def questao01():
"""
Elabore um programa que efetue a leitura de duas strings e informe o seu conteúdo,
seguido de seu compri- mento. Indique também se as
duas strings possuem o mesmo comprimento e se são iguais ou diferentes no conteúdo.
"""
dicionario = {}
for i in range(2):
palavra = input('Digite uma palavra: ')
dicionario[i] = [palavra, len(palavra)]
print(dicionario)
if dicionario[0][0] == dicionario[1][0]:
print('Conteúdo iguais')
if dicionario[0][1] == dicionario[1][1]:
print('Comprimento iguais')
def questao02():
"""
Elabore um programa que solicite ao usuário, o seu nome e em seguida
mostre o seu nome de trás para frente utilizando somente letras maiúsculas.
"""
nome = input('Digite seu nome: ')
print(nome[::-1].upper())
def questao03():
"""
Elaborar um programa que solicite a digitação de um número
de CPF no formato xxx.xxx.xxx-xx e indique se é um número válido ou inválido
através da validação dos dígitos verificadores e dos caracteres de formatação.
"""
cpf = input("Digite seu CPF\n")
if len(cpf) == 14 and cpf[3] == "." and cpf[7] == "." and cpf[11] == "-":
print("É um CPF")
else:
print("Não é um CPF")
def questao04():
"""
Elaborar um programa que a partir da digitação de uma frase,
o programa informe quantos espaços
em branco e quantos são, e quantas vezes aparecem cada uma das vogais a, e, i, o, u.
"""
frase = input('Digite uma frase: ').lower()
vogais = ['a', 'e', 'i', 'o', 'u']
vogais_na_frase = 0
espacos_em_branco = 0
for i in frase:
if i in vogais:
vogais_na_frase += 1
if i in " ":
espacos_em_branco += 1
print(f'Numeros de vogais: {vogais_na_frase}')
print(f'Numeros de espacos em branco: {espacos_em_branco}')
def questao05():
"""
Faça um programa que leia um número de telefone,
e corrija o número no caso deste conter somente 7 dígitos,
acrescentando o ’3’ na frente.
O usuário pode informar o número com ou sem o traço separador.
"""
telefone = input('Digite um telefone: ')
traco = False
for i in telefone:
if i == '-':
traco = True
if len(telefone) == 7 or len(telefone) == 8 and traco:
telefone = '3' + telefone
print(f'Seu telefone é: {telefone}')
def questao06():
"""
Desenvolva um jogo em que o usuário tenha que adivinhar uma palavra que
será mostrada com as letras embaralhadas. O programa terá uma lista de
palavras lidas de uma lista a ser fixada inicialmente pelo programador e
escolherá uma aleatoriamente. O jogador terá uma única tentativa para adivinhar
a palavra. Ao final a palavra deve ser mostrada na tela, informando se o usuário
ganhou ou perdeu o jogo.
Observação: Refaça, possibilitando ao jogador tentar até 5 vezes.
"""
import random
animais = ['gato', 'cachorro', 'cavalo', 'jumento', 'peixe', 'zebra', 'papagaio', 'girafa', 'pomba', 'lagosta']
escolhida = random.choice(animais)
shuffled = list(escolhida)
random.shuffle(shuffled)
shuffled = "".join(shuffled)
print(f'A palavra embaralhada é {shuffled}\n')
tentativa = input('Qual a palavra embaralhada? ')
if escolhida == tentativa.lower():
print('Você acertou, parabéns')
else:
print('Você errou')
print(f'A palavra era {escolhida}')
def questao07():
"""
Elabore um programa que efetue a leitura de
cinco números inteiros, adicione-os a uma lista e mostre-a.
"""
lista = []
for i in range(5):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista)
def questao08():
"""
Elabore um programa que efetue a leitura de quinze números inteiros,
adicione-os a uma lista e mostre-a de forma invertida, do último para o primeiro.
"""
lista = []
for i in range(15):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista[::-1])
def questao09():
"""
Elabore um programa que efetue a leitura de quatro notas reais,
adicione-as a uma lista e mostre-as, inclusive a média aritmética,
arredondar duas casas decimais. Verifique e exiba as devidas mensagens
se o aluno está aprovado ou não, considerando que a média de aprovação
é maior ou igual a 7.0, e em prova exame, se
média aritmética entre 4.0 e menor que 7.0. E reprovado, se menor que 4.0.
"""
lista = []
soma = 0
for i in range(4):
nota = float(input('Digite sua nota: '))
soma = soma + nota
lista.append(nota)
media = round(soma / 4, 2)
print(f'Suas notas são {lista}sendo assim sua média é {media}')
if media >= 7:
print('Você está aprovado')
elif 4 <= media < 7:
print('Pegou exame')
else:
print('Reprovou')
def questao10():
"""
Faça um programa que leia uma lista com dez caracteres,
e diga quantas consoantes foram lidas. Imprima as consoantes.
"""
vogais = ['a', 'e', 'i', 'o', 'u']
lista = []
j = 0
for i in range(10):
caracter = input('Digite um caracter: ')
caracter = caracter.lower()
if caracter in vogais:
pass
else:
lista.append(caracter)
j += 1
print(f'Foram inseridas {j} consoantes, são elas {lista}')
def questao11():
"""
Faça um programa que leia 15 números inteiros e armazene-os em uma lista NUMEROS.
Armazene os números
pares na lista PAR e os números ímpares na lista IMPAR. Imprima os três vetores.
"""
numeros = []
par = []
impar = []
for i in range(10):
numero = int(input('Digite um número: '))
numeros.append(numero)
if numero % 2 == 0:
par.append(numero)
else:
impar.append(numero)
print(f'Os números digitados foram {numeros}\n'
f'Dentre eles esses são pares {par} e estes são ímpares {impar}')
def questao12():
"""
Elabore um programa que efetue a leitura de quatro notas reais de10 alunos,
calcule e armazene em uma lista,
a média de cada aluno, imprima o número de alunos com média maior ou igual a 7.0.
"""
lista = []
k = 0
for i in range(1, 11):
soma = 0
for j in range(1, 5):
nota = float(input(f'Digite a {j}ª nota do aluno "{i}\n'))
soma = soma + nota
media = soma / 4
lista.append(media)
if media >= 7:
k += 1
print(f'A média dos 10 alunos eh {lista} sendo {k} acima da média')
def questao | """
Faça um programa que carregue uma lista com os modelos
de cinco carros (exemplo de modelos: FUSCA, GOL, VECTRA etc).
Carregue uma outra lista com o consumo desses carros, isto é,
quantos quilômetros cada um desses carros faz com um litro de combustível.
Calcule e mostre:
O modelo do carro mais econômico;
Quantos litros de combustível cada um dos carros
cadastrados consome para percorrer uma distância de
1000 quilômetros e quanto isto custará, considerando
um que a gasolina custe 2,25 o litro.
Abaixo segue uma tela de exemplo. O disposição das
informações deve ser o mais próxima possível ao exemplo.
Os dados são fictícios e podem mudar a cada execução do programa.
Relatório Final
1 - SUV - 10.0 - 100.0 litros - R 399.0
2 - IDEA - 12.0 - 83.3 litros - R 332.5
3 - GOL - 10.0 - 100.0 litros - R 399.0
4 - BMW - 20.0 - 50.0 litros - R 199.5
5 - UNO - 2.0 - 500.0 litros - R 1995.0
O menor consumo é do BMW.
"""
carros = ['Fusca', 'Gol', 'Vectra', 'Uno', 'Amarok']
consumo = [20.0, 18.0, 9.5, 15.0, 5.7]
economico = 9999
j = 0
for i in consumo:
print(f'{j + 1}-{carros[j]} - {i} - {round(1000 / i, 1)} litros - R${round(1000 / i * 2.25, 1)}')
if i < economico:
economico = i
carro = j
j += 1
print(f'O menor consumo é do {carros[carro]}')
# Main Program
# Veja o enunciado:
help(questao13())
# Chame determinadas funcoes atraves de:
questao13() | 13():
|
mod.rs | // MIT/Apache2 License
#![cfg(windows)]
mod event;
use crate::{
display::{Display, DrawHandler, EventHandler},
mutex::Mutex,
screen::{Screen, ScreenIter},
window::{Window, WindowProps},
Dimensions,
};
use chalkboard::yaww::{YawwGdiSurface, YawwGdiSurfaceResidual};
use dashmap::DashMap;
use nanorand::RNG;
use once_cell::sync::OnceCell;
use std::{
borrow::Cow,
cell::RefCell,
collections::hash_map::{Entry, HashMap},
ffi::{CStr, CString},
iter,
num::NonZeroUsize,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use thread_safe::ThreadSafe;
use yaww::{
brush::DEFAULT_BRUSH,
dc::Dc,
monitor::{MonitorFunctions, MonitorInfo},
window::{ExtendedWindowStyle, ShowWindowCommand, Window as YWindow, WindowStyle},
window_class::ClassStyle,
GuiThread, GuiThreadHandle, Rectangle, SendsDirective, WcFunctions, WindowFunctions,
};
/// Wrapper around a `yaww` handle.
#[derive(Clone)]
pub struct YawwDisplay<'evh> {
handle: GuiThreadHandle<'evh>,
associated: Arc<Data<'evh>>,
}
/// Internal data that needs to be passed around.
struct Data<'evh> {
// hash map containing window properties
window_properties: DashMap<YWindow, WindowProps>,
// we keep a reference to the GUI thread
gui_thread: Mutex<Option<ThreadSafe<GuiThread<'evh>>>>,
// cached monitors
monitors: OnceCell<HashMap<Screen, MonitorInfo>>,
// the window class
window_class: OnceCell<Box<CStr>>,
// cached DC/cached window
// Note: This HashMap is used in two places: in the draw() function, and in the WM_PAINT handler. Although
// the draw() function may be called from any thread, it is often called in the wndproc, where
// WM_PAINT is also called. Therefore I'm fine not using a DashMap in this case since the case where
// any contention actually happens in extremely rare and will probably lead to an error anyways
dcs: Mutex<HashMap<YWindow, DcOrMaybeResidual>>,
}
#[derive(Default)]
struct DcOrMaybeResidual {
dc: Option<Dc>,
residual: Option<Residual>,
}
enum Residual {
Gdi(YawwGdiSurfaceResidual),
}
impl<'evh> YawwDisplay<'evh> {
#[inline]
pub fn new() -> crate::Result<Self> {
let gt = match GuiThread::try_new() {
Ok(gt) => gt,
Err(yaww::Error::AlreadyAYawwThread) => {
return Err(crate::Error::StaticMsg("Thread already existed"))
}
Err(e) => return Err(e.into()),
};
log::trace!("Created GuiThread");
let handle = gt.handle();
log::trace!("Created GuiThreadHandle");
Ok(Self {
handle,
associated: Arc::new(Data {
window_properties: DashMap::new(),
gui_thread: Mutex::new(Some(ThreadSafe::new(gt))),
monitors: OnceCell::new(),
window_class: OnceCell::new(),
dcs: Mutex::new(HashMap::new()),
}),
})
}
#[inline]
pub(crate) fn store_dc(&self, window: YWindow, dc: Dc) {
let mut dcs = self.associated.dcs.lock();
match dcs.entry(window) {
Entry::Occupied(mut o) => {
o.get_mut().dc = Some(dc);
}
Entry::Vacant(v) => {
v.insert(DcOrMaybeResidual {
dc: Some(dc),
residual: None,
});
}
}
}
#[inline]
fn monitors(&self) -> crate::Result<&HashMap<Screen, MonitorInfo>> {
let handle = self.handle.clone();
self.associated.monitors.get_or_try_init(move || {
log::trace!("Initialized monitors");
let monitors = handle.monitors()?.wait()?;
crate::Result::Ok(
monitors
.into_iter()
.map(|s| (Screen::from_raw(s.monitor.into_raw().get()), s))
.collect(),
)
})
}
#[inline]
fn is_monitor(&self, window: Window) -> crate::Result<bool> {
let screen = Screen::from_raw(window.into_raw().get());
Ok(self.monitors()?.contains_key(&screen))
}
#[inline]
fn check_for_monitor(&self, window: Window) -> crate::Result<Option<(Screen, &MonitorInfo)>> {
let screen = Screen::from_raw(window.into_raw().get());
match self.monitors()?.get(&screen) {
Some(mon) => Ok(Some((screen, mon))),
None => Ok(None),
}
}
#[inline]
fn common_window_class(&self) -> crate::Result<CString> {
let handle = self.handle.clone();
self.associated
.window_class
.get_or_try_init(move || {
let mut class_name = b"GuiToolsWndClass".to_vec();
let mut rng = nanorand::tls_rng();
class_name.extend(
iter::repeat_with(move || rng.generate_range(0x48, 0x57))
.take(nanorand::tls_rng().generate_range(1, 4)),
);
let class_name = CString::new(class_name)
.expect("Class name shouldn't have a 0 byte")
.into_boxed_c_str();
handle
.register_class(
class_name.clone().into_c_string(),
None,
ClassStyle::empty(),
None,
None,
None,
Some(DEFAULT_BRUSH),
)?
.wait()?;
crate::Result::Ok(class_name)
})
.map(|c| c.clone().into_c_string())
}
#[inline]
pub(crate) fn create_window_custom_class(
&self,
x: i32,
y: i32,
width: u32,
height: u32,
parent: Window,
mut props: WindowProps,
base_class: Option<Cow<'static, CStr>>,
) -> crate::Result<Window> {
let (x, y, parent) = match self.check_for_monitor(parent)? {
None => (x, y, Some(YWindow::from_raw(parent.into_raw()))),
Some((scr, mon)) => (mon.x as i32 + x, mon.y as i32 + y, None),
};
let title = props.title.take();
let title = CString::new(match title {
Some(title) => title.into_bytes(),
None => vec![],
})
.map_err(|_| crate::Error::NotCompatible)?;
let window = self
.handle
.create_window(
self.common_window_class()?,
base_class,
Some(Cow::Owned(title)),
WindowStyle::OVERLAPPED_WINDOW,
ExtendedWindowStyle::CLIENT_EDGE,
x,
y,
width as _,
height as _,
parent,
None,
)?
.wait()?;
self.associated.window_properties.insert(window, props);
Ok(Window::from_raw(window.into_raw()))
}
}
impl<'evh> Display<'evh> for YawwDisplay<'evh> {
#[inline]
fn screens(&mut self) -> crate::Result<ScreenIter<'_>> {
let screens: Vec<usize> = self.monitors()?.keys().map(|s| s.into_raw()).collect();
Ok(ScreenIter::from(screens))
}
#[inline]
fn default_screen(&mut self) -> crate::Result<Screen> {
let scr = self.handle.default_monitor()?.wait()?;
Ok(Screen::from_raw(scr.into_raw().get()))
}
#[inline]
fn screen_dimensions(&mut self, screen: Screen) -> crate::Result<(u32, u32)> {
match self.monitors()?.get(&screen) {
Some(monitor) => Ok((monitor.width as _, monitor.height as _)),
None => Err(crate::Error::NoScreen(screen.into_raw())),
}
}
#[inline]
fn toplevel_window(&mut self, screen: Screen) -> crate::Result<Window> {
// we represent Windows as "either a Window or a Monitor", so this is a simple
// translation
Ok(Window::from_raw(
NonZeroUsize::new(screen.into_raw()).expect("Screen should not be zero"),
))
}
#[inline]
fn create_window(
&mut self,
x: i32,
y: i32,
width: u32,
height: u32,
parent: Window,
props: WindowProps,
) -> crate::Result<Window> {
self.create_window_custom_class(x, y, width, height, parent, props, None)
}
#[inline]
fn delete_window(&mut self, window: Window) -> crate::Result {
let window = YWindow::from_raw(window.into_raw());
window.close(&self.handle)?.wait()?;
self.associated.window_properties.remove(&window);
Ok(())
}
#[inline]
fn set_window_visibility(&mut self, window: Window, visible: bool) -> crate::Result {
if self.is_monitor(window)? {
return Err(crate::Error::CannotOnMonitor);
}
let window = YWindow::from_raw(window.into_raw());
let _ = window
.show(
&self.handle,
if visible {
ShowWindowCommand::SHOW
} else {
ShowWindowCommand::HIDE
},
)?
.wait();
Ok(())
}
#[inline]
fn window_dimensions(&mut self, window: Window) -> crate::Result<Dimensions> {
if let Some((_, mon_info)) = self.check_for_monitor(window)? {
return Ok(Dimensions {
x: mon_info.x as _,
y: mon_info.y as _,
width: mon_info.width as _,
height: mon_info.height as _,
});
}
let Rectangle {
left,
top,
right,
bottom,
} = cvt_window(window).get_client_rect(&self.handle)?.wait()?;
Ok(Dimensions {
x: left as _,
y: top as _,
width: (right - left) as _,
height: (bottom - top) as _,
})
}
// window_coordinates and window_size are implemented in terms of the above function
#[inline]
fn window_set_dimensions(
&mut self,
window: Window,
x: i32,
y: i32,
width: u32,
height: u32,
) -> crate::Result {
if self.is_monitor(window)? {
return Err(crate::Error::CannotOnMonitor);
}
let window = cvt_window(window);
window
.move_resize_window(&self.handle, x, y, width as _, height as _, true)?
.wait()?;
Ok(())
}
#[inline]
fn window_set_coordinates(&mut self, window: Window, x: i32, y: i32) -> crate::Result {
if self.is_monitor(window)? {
return Err(crate::Error::CannotOnMonitor);
}
let window = cvt_window(window);
window.move_window(&self.handle, x, y, true)?.wait()?;
Ok(())
}
#[inline]
fn window_set_size(&mut self, window: Window, width: u32, height: u32) -> crate::Result {
if self.is_monitor(window)? {
return Err(crate::Error::CannotOnMonitor);
}
let window = cvt_window(window);
window
.resize_window(&self.handle, width as _, height as _, true)?
.wait()?;
Ok(())
}
#[inline]
fn draw_with_boxed_draw_handler(
&mut self,
window: Window,
handler: DrawHandler<'_>,
) -> crate::Result {
if self.is_monitor(window)? {
unimplemented!()
}
let window = cvt_window(window);
let mut dcs = self.associated.dcs.lock();
let entry = match dcs.get_mut(&window) {
Some(entry) => entry,
None => return Err(crate::Error::NoValidDraw(window.into_raw())),
};
let dc = entry
.dc
.take()
.ok_or(crate::Error::NoValidDraw(window.into_raw()))?;
// TODO: also handle direct2d and gl, when the time comes
let mut surface = match entry.residual.take() {
Some(Residual::Gdi(residual)) => {
YawwGdiSurface::from_residual(&self.handle, dc, residual)
}
None => YawwGdiSurface::new(&self.handle, dc),
};
let res = handler(&mut surface);
let residual = match surface {
surface => Residual::Gdi(surface.into_residual()),
};
entry.residual = Some(residual);
res
}
#[inline]
fn window_parent(&mut self, window: Window) -> crate::Result<Option<Window>> {
let window = cvt_window(window);
let parent = window.get_parent(&self.handle)?.wait();
Ok(parent.map(cvt_window_r))
}
#[inline]
fn run_with_boxed_event_handler(&mut self, mut handler: EventHandler<'evh>) -> crate::Result {
log::trace!("Running yaww main loop");
let gt = self
.associated
.gui_thread
.lock()
.take()
.ok_or(crate::Error::AlreadyRanMainLoop)?
.try_into_inner()
.map_err(|_| {
crate::Error::StaticMsg("Main loop can only be ran in the originating thread")
})?;
log::trace!("Loaded main GuiThread");
let mut this = self.clone();
let event_handler: Box<
dyn FnMut(&yaww::PinnedGuiThreadHandle<'evh>, yaww::Event) -> yaww::Result
+ Send
+ 'evh,
> = Box::new(move |_, ev| {
let ev = match event::cvt_event(&this, ev) {
Ok(Some(ev)) => ev,
Ok(None) => return Ok(()),
Err(e) => return Err(yaww::Error::Dynamic(Arc::new(e))),
};
match handler(&mut this, ev) {
Ok(()) => Ok(()),
Err(e) => Err(yaww::Error::Dynamic(Arc::new(e))),
}
});
gt.set_event_handler(event_handler);
match gt.main_loop() {
Ok(()) => Ok(()),
Err(e) => match e {
e => Err(e.into()),
},
}
}
}
#[inline]
fn cvt_window(window: Window) -> YWindow {
YWindow::from_raw(window.into_raw())
}
#[inline]
pub(crate) fn cvt_window_r(window: YWindow) -> Window | {
Window::from_raw(window.into_raw())
} |
|
docker_cli_inspect_test.go | package main
import (
"encoding/json"
"fmt"
"os/exec"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/integration/checker"
"github.com/docker/docker/runconfig"
"github.com/go-check/check"
)
func (s *DockerSuite) TestInspectImage(c *check.C) {
testRequires(c, DaemonIsLinux)
imageTest := "emptyfs"
imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
id, err := inspectField(imageTest, "Id")
c.Assert(err, check.IsNil)
if id != imageTestID {
c.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id)
}
}
func (s *DockerSuite) TestInspectInt64(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true")
inspectOut, err := inspectField("inspectTest", "HostConfig.Memory")
c.Assert(err, check.IsNil)
if inspectOut != "314572800" {
c.Fatalf("inspect got wrong value, got: %q, expected: 314572800", inspectOut)
}
}
func (s *DockerSuite) TestInspectDefault(c *check.C) {
testRequires(c, DaemonIsLinux)
//Both the container and image are named busybox. docker inspect will fetch the container JSON.
//If the container JSON is not available, it will go for the image JSON.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
dockerCmd(c, "inspect", "busybox")
}
func (s *DockerSuite) TestInspectStatus(c *check.C) {
defer unpauseAllContainers()
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
out = strings.TrimSpace(out)
inspectOut, err := inspectField(out, "State.Status")
c.Assert(err, check.IsNil)
if inspectOut != "running" {
c.Fatalf("inspect got wrong status, got: %q, expected: running", inspectOut)
}
dockerCmd(c, "pause", out)
inspectOut, err = inspectField(out, "State.Status")
c.Assert(err, check.IsNil)
if inspectOut != "paused" {
c.Fatalf("inspect got wrong status, got: %q, expected: paused", inspectOut)
}
dockerCmd(c, "unpause", out)
inspectOut, err = inspectField(out, "State.Status")
c.Assert(err, check.IsNil)
if inspectOut != "running" {
c.Fatalf("inspect got wrong status, got: %q, expected: running", inspectOut)
}
dockerCmd(c, "stop", out)
inspectOut, err = inspectField(out, "State.Status")
c.Assert(err, check.IsNil)
if inspectOut != "exited" {
c.Fatalf("inspect got wrong status, got: %q, expected: exited", inspectOut)
}
}
func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
//Both the container and image are named busybox. docker inspect will fetch container
//JSON State.Running field. If the field is true, it's a container.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top")
formatStr := "--format='{{.State.Running}}'"
out, exitCode, err := dockerCmdWithError("inspect", "--type=container", formatStr, "busybox")
if exitCode != 0 || err != nil {
c.Fatalf("failed to inspect container: %s, %v", out, err)
}
if out != "true\n" {
c.Fatal("not a container JSON")
}
}
func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
//Run this test on an image named busybox. docker inspect will try to fetch container
//JSON. Since there is no container named busybox and --type=container, docker inspect will
//not try to get the image JSON. It will throw an error.
dockerCmd(c, "run", "-d", "busybox", "true")
_, exitCode, err := dockerCmdWithError("inspect", "--type=container", "busybox")
if exitCode == 0 || err == nil {
c.Fatalf("docker inspect should have failed, as there is no container named busybox")
}
}
func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) {
testRequires(c, DaemonIsLinux)
//Both the container and image are named busybox. docker inspect will fetch image
//JSON as --type=image. if there is no image with name busybox, docker inspect
//will throw an error.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
out, exitCode, err := dockerCmdWithError("inspect", "--type=image", "busybox")
if exitCode != 0 || err != nil {
c.Fatalf("failed to inspect image: %s, %v", out, err)
}
if strings.Contains(out, "State") {
c.Fatal("not an image JSON")
}
}
func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) {
testRequires(c, DaemonIsLinux)
//Both the container and image are named busybox. docker inspect will fail
//as --type=foobar is not a valid value for the flag.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox")
if exitCode != 0 || err != nil {
if !strings.Contains(out, "not a valid value for --type") {
c.Fatalf("failed to inspect image: %s, %v", out, err)
}
}
}
func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) {
testRequires(c, DaemonIsLinux)
imageTest := "emptyfs"
out, err := inspectField(imageTest, "Size")
c.Assert(err, check.IsNil)
size, err := strconv.Atoi(out)
if err != nil {
c.Fatalf("failed to inspect size of the image: %s, %v", out, err)
}
//now see if the size turns out to be the same
formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size)
out, exitCode, err := dockerCmdWithError("inspect", formatStr, imageTest)
if exitCode != 0 || err != nil {
c.Fatalf("failed to inspect image: %s, %v", out, err)
}
if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result {
c.Fatalf("Expected size: %d for image: %s but received size: %s", size, imageTest, strings.TrimSuffix(out, "\n"))
}
}
func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) {
testRequires(c, DaemonIsLinux)
runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
runCmd.Stdin = strings.NewReader("blahblah")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
id := strings.TrimSpace(out)
out, err = inspectField(id, "State.ExitCode")
c.Assert(err, check.IsNil)
exitCode, err := strconv.Atoi(out)
if err != nil {
c.Fatalf("failed to inspect exitcode of the container: %s, %v", out, err)
}
//now get the exit code to verify
formatStr := fmt.Sprintf("--format='{{eq .State.ExitCode %d}}'", exitCode)
out, _ = dockerCmd(c, "inspect", formatStr, id)
if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result {
c.Fatalf("Expected exitcode: %d for container: %s", exitCode, id)
}
}
func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) {
testRequires(c, DaemonIsLinux)
imageTest := "emptyfs"
name, err := inspectField(imageTest, "GraphDriver.Name")
c.Assert(err, check.IsNil)
if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" {
c.Fatalf("%v is not a valid graph driver name", name)
}
if name != "devicemapper" {
return
}
deviceID, err := inspectField(imageTest, "GraphDriver.Data.DeviceId")
c.Assert(err, check.IsNil)
_, err = strconv.Atoi(deviceID)
if err != nil {
c.Fatalf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)
}
deviceSize, err := inspectField(imageTest, "GraphDriver.Data.DeviceSize")
c.Assert(err, check.IsNil)
_, err = strconv.ParseUint(deviceSize, 10, 64) | if err != nil {
c.Fatalf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)
}
}
func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
out = strings.TrimSpace(out)
name, err := inspectField(out, "GraphDriver.Name")
c.Assert(err, check.IsNil)
if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" {
c.Fatalf("%v is not a valid graph driver name", name)
}
if name != "devicemapper" {
return
}
deviceID, err := inspectField(out, "GraphDriver.Data.DeviceId")
c.Assert(err, check.IsNil)
_, err = strconv.Atoi(deviceID)
if err != nil {
c.Fatalf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)
}
deviceSize, err := inspectField(out, "GraphDriver.Data.DeviceSize")
c.Assert(err, check.IsNil)
_, err = strconv.ParseUint(deviceSize, 10, 64)
if err != nil {
c.Fatalf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)
}
}
func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "test", "-v", "/data:/data:ro,z", "busybox", "cat")
vol, err := inspectFieldJSON("test", "Mounts")
c.Assert(err, check.IsNil)
var mp []types.MountPoint
err = unmarshalJSON([]byte(vol), &mp)
c.Assert(err, check.IsNil)
if len(mp) != 1 {
c.Fatalf("Expected 1 mount point, was %v\n", len(mp))
}
m := mp[0]
if m.Name != "" {
c.Fatal("Expected name to be empty")
}
if m.Driver != "" {
c.Fatal("Expected driver to be empty")
}
if m.Source != "/data" {
c.Fatalf("Expected source /data, was %s\n", m.Source)
}
if m.Destination != "/data" {
c.Fatalf("Expected destination /data, was %s\n", m.Destination)
}
if m.Mode != "ro,z" {
c.Fatalf("Expected mode `ro,z`, was %s\n", m.Mode)
}
if m.RW != false {
c.Fatalf("Expected rw to be false")
}
}
// #14947
func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
id := strings.TrimSpace(out)
startedAt, err := inspectField(id, "State.StartedAt")
c.Assert(err, check.IsNil)
finishedAt, err := inspectField(id, "State.FinishedAt")
c.Assert(err, check.IsNil)
created, err := inspectField(id, "Created")
c.Assert(err, check.IsNil)
_, err = time.Parse(time.RFC3339Nano, startedAt)
c.Assert(err, check.IsNil)
_, err = time.Parse(time.RFC3339Nano, finishedAt)
c.Assert(err, check.IsNil)
_, err = time.Parse(time.RFC3339Nano, created)
c.Assert(err, check.IsNil)
created, err = inspectField("busybox", "Created")
c.Assert(err, check.IsNil)
_, err = time.Parse(time.RFC3339Nano, created)
c.Assert(err, check.IsNil)
}
// #15633
func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox")
var logConfig runconfig.LogConfig
out, err := inspectFieldJSON("test", "HostConfig.LogConfig")
c.Assert(err, check.IsNil)
err = json.NewDecoder(strings.NewReader(out)).Decode(&logConfig)
c.Assert(err, check.IsNil)
c.Assert(logConfig.Type, check.Equals, "json-file")
c.Assert(logConfig.Config["max-file"], check.Equals, "42", check.Commentf("%v", logConfig))
}
func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) {
//Both the container and image are named busybox. docker inspect will fetch container
//JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top")
formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'"
out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox")
c.Assert(strings.TrimSpace(out), check.Equals, "<nil>,<nil>", check.Commentf("Exepcted not to display size info: %s", out))
}
func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) {
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top")
formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'"
out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox")
sz := strings.Split(out, ",")
c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "<nil>")
c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "<nil>")
}
func (s *DockerSuite) TestInspectSizeFlagImage(c *check.C) {
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top")
formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'"
out, _, err := dockerCmdWithError("inspect", "-s", "--type=image", formatStr, "busybox")
// Template error rather than <no value>
// This is a more correct behavior because images don't have sizes associated.
c.Assert(err, check.Not(check.IsNil))
c.Assert(out, checker.Contains, "Template parsing error")
}
func (s *DockerSuite) TestInspectTempateError(c *check.C) {
//Both the container and image are named busybox. docker inspect will fetch container
//JSON State.Running field. If the field is true, it's a container.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top")
out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox")
c.Assert(err, check.Not(check.IsNil))
c.Assert(out, checker.Contains, "Template parsing error")
}
func (s *DockerSuite) TestInspectJSONFields(c *check.C) {
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top")
out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.HostConfig.Dns}}'", "busybox")
c.Assert(err, check.IsNil)
c.Assert(out, checker.Equals, "[]\n")
} | |
otp.rs | use super::{Context, Handle};
use anyhow::Result;
use clap::{ArgEnum, Parser};
use otp::{Generator, HmacSha1, HOTP, TOTP};
/// Generates a two-factor token which is valid for
/// only one login session or transaction.
#[derive(Parser, Debug)]
pub struct | {
/// Specifies the token to generate.
#[clap(long, short)]
token: Option<String>,
/// Specifies the key of the token stored in the store.
#[clap(long, short, conflicts_with = "token")]
key: Option<String>,
/// If --clip or -c is specified, do not print the password but instead copy
/// it to the clipboard.
#[cfg(feature = "clipcopy")]
#[clap(long, short)]
clip: bool,
/// Specifies the algorithm to hash the token.
#[clap(long, arg_enum, default_value = "t")]
algo: Algo,
}
#[derive(ArgEnum, Debug, Clone, PartialEq, PartialOrd)]
pub enum Algo {
#[clap(alias = "h")]
Hotp,
#[clap(alias = "t")]
Totp,
}
impl Handle for Otp {
fn invoke(&self, ctx: &Context) -> Result<()> {
let token = if let Some(k) = &self.key {
String::from_utf8(ctx.decrypt_file(&k)?.unsecure_ref().to_vec())?
} else if let Some(t) = &self.token {
t.to_owned()
} else {
return Err(anyhow::anyhow!("token or key MUST specified"));
};
let code = match self.algo {
Algo::Hotp => HOTP::<HmacSha1>::new()
.with_base32_secret(token)
.generate()?,
Algo::Totp => TOTP::<HmacSha1>::new()
.with_base32_secret(token)
.generate()?,
};
#[cfg(feature = "clipcopy")]
if self.clip {
use clipboard::{ClipboardContext, ClipboardProvider};
ClipboardContext::new()
.and_then(|mut cp| cp.set_contents(code))
.map_err(|e| anyhow::anyhow!("{}", e))?;
} else {
println!("{}", &code);
}
#[cfg(not(feature = "clipcopy"))]
println!("{}", &code);
Ok(())
}
}
| Otp |
serialization_type.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dotnet // import "github.com/rati3l/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver/dotnet"
import (
"github.com/rati3l/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver/network"
) |
// serializationType is parsed at the beginning of each block. The resulting
// type name determines which parser should handle the remaining bytes in the
// block.
type serializationType struct {
version int32
minReaderVersion int32
name string
}
func parseSerializationType(r network.MultiReader) (t serializationType, err error) {
err = beginPrivateObject(r)
if err != nil {
return
}
const tagNullReference = 1
err = r.AssertNextByteEquals(tagNullReference)
if err != nil {
return
}
err = r.Read(&t.version)
if err != nil {
return
}
err = r.Read(&t.minReaderVersion)
if err != nil {
return
}
var strlen int32
err = r.Read(&strlen)
if err != nil {
return
}
t.name, err = r.ReadASCII(int(strlen))
if err != nil {
return
}
err = endObject(r)
return
} | |
guest_reset_task.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tasks
import (
"context"
"yunion.io/x/jsonutils"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudcommon/db"
"yunion.io/x/onecloud/pkg/cloudcommon/db/taskman"
"yunion.io/x/onecloud/pkg/compute/models"
)
func init() |
type GuestSoftResetTask struct {
SGuestBaseTask
}
func (self *GuestSoftResetTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) {
guest := obj.(*models.SGuest)
err := guest.GetDriver().RequestSoftReset(ctx, guest, self)
if err == nil {
self.SetStageComplete(ctx, nil)
} else {
self.SetStageFailed(ctx, jsonutils.NewString(err.Error()))
}
}
type GuestHardResetTask struct {
SGuestBaseTask
}
func (self *GuestHardResetTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) {
guest := obj.(*models.SGuest)
self.StopServer(ctx, guest)
}
func (self *GuestHardResetTask) StopServer(ctx context.Context, guest *models.SGuest) {
guest.SetStatus(self.UserCred, api.VM_STOPPING, "")
self.SetStage("OnServerStopComplete", nil)
guest.StartGuestStopTask(ctx, self.UserCred, false, false, self.GetTaskId())
// logclient.AddActionLogWith(guest, logclient.ACT_VM_RESTART, `{"is_force": true}`, self.UserCred, true)
}
func (self *GuestHardResetTask) OnServerStopComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) {
self.StartServer(ctx, guest)
}
func (self *GuestHardResetTask) StartServer(ctx context.Context, guest *models.SGuest) {
self.SetStage("OnServerStartComplete", nil)
guest.StartGueststartTask(ctx, self.UserCred, nil, self.GetTaskId())
}
func (self *GuestHardResetTask) OnServerStartComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) {
self.SetStageComplete(ctx, nil)
}
type GuestRestartTask struct {
GuestHardResetTask
}
func (self *GuestRestartTask) StopServer(ctx context.Context, guest *models.SGuest) {
self.SetStage("OnServerStopComplete", nil)
isForce := jsonutils.QueryBoolean(self.Params, "is_force", false)
guest.StartGuestStopTask(ctx, self.UserCred, isForce, false, self.GetTaskId())
// logclient.AddActionLog(guest, logclient.ACT_VM_RESTART, `{"is_force": false}`, self.UserCred, true)
}
| {
taskman.RegisterTask(GuestSoftResetTask{})
taskman.RegisterTask(GuestHardResetTask{})
taskman.RegisterTask(GuestRestartTask{})
} |
obj.py | from pyadlml.dataset._representations.raw import create_raw
from pyadlml.dataset._representations.changepoint import create_changepoint
from pyadlml.dataset.activities import check_activities
class Data():
def __init__(self, activities, devices, activity_list, device_list):
#assert check_activities(activities)
#assert check_devices(devices)
self.df_activities = activities
self.df_devices = devices
# list of activities and devices
self.lst_activities = activity_list
self.lst_devices = device_list
def create_cp(self, t_res):
raise NotImplementedError
def create_raw(self, t_res=None, idle=False):
|
def create_lastfired(self):
raise NotImplementedError | self.df_raw = create_raw(self.df_devices, self.df_activities, t_res) |
save-and-load.js | // Functions to save and load the map
"use strict";
// download map as SVG
async function saveSVG() {
TIME && console.time("saveSVG");
const url = await getMapURL("svg");
const link = document.createElement("a");
link.download = getFileName() + ".svg";
link.href = url;
link.click();
tip(`${link.download} is saved. Open "Downloads" screen (crtl + J) to check. You can set image scale in options`, true, "success", 5000);
TIME && console.timeEnd("saveSVG");
}
// download map as PNG
async function savePNG() {
TIME && console.time("savePNG");
const url = await getMapURL("png");
const link = document.createElement("a");
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
canvas.width = svgWidth * pngResolutionInput.value;
canvas.height = svgHeight * pngResolutionInput.value;
const img = new Image();
img.src = url;
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
link.download = getFileName() + ".png";
canvas.toBlob(function(blob) {
link.href = window.URL.createObjectURL(blob);
link.click();
window.setTimeout(function() {
canvas.remove();
window.URL.revokeObjectURL(link.href);
tip(`${link.download} is saved. Open "Downloads" screen (crtl + J) to check. You can set image scale in options`, true, "success", 5000);
}, 1000);
});
}
TIME && console.timeEnd("savePNG");
}
// download map as JPEG
async function saveJPEG() {
TIME && console.time("saveJPEG");
const url = await getMapURL("png");
const canvas = document.createElement("canvas");
canvas.width = svgWidth * pngResolutionInput.value;
canvas.height = svgHeight * pngResolutionInput.value;
const img = new Image();
img.src = url;
img.onload = async function() {
canvas.getContext("2d").drawImage(img, 0, 0, canvas.width, canvas.height);
const quality = Math.min(rn(1 - pngResolutionInput.value / 20, 2), .92);
const URL = await canvas.toDataURL("image/jpeg", quality);
const link = document.createElement("a");
link.download = getFileName() + ".jpeg";
link.href = URL;
link.click();
tip(`${link.download} is saved. Open "Downloads" screen (CTRL + J) to check`, true, "success", 7000);
window.setTimeout(() => window.URL.revokeObjectURL(URL), 5000);
}
TIME && console.timeEnd("saveJPEG");
}
// parse map svg to object url
async function getMapURL(type, subtype) {
const cloneEl = document.getElementById("map").cloneNode(true); // clone svg
cloneEl.id = "fantasyMap";
document.body.appendChild(cloneEl);
const clone = d3.select(cloneEl);
clone.select("#debug").remove();
const cloneDefs = cloneEl.getElementsByTagName("defs")[0];
const svgDefs = document.getElementById("defElements");
const isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1;
if (isFirefox && type === "mesh") clone.select("#oceanPattern").remove();
if (subtype === "globe") clone.select("#scaleBar").remove();
if (subtype === "noWater") {
clone.select("#oceanBase").attr("opacity", 0);
clone.select("#oceanPattern").attr("opacity", 0);
}
if (type !== "png") {
// reset transform to show the whole map
clone.attr("width", graphWidth).attr("height", graphHeight);
clone.select("#viewbox").attr("transform", null);
}
if (type === "svg") removeUnusedElements(clone);
if (customization && type === "mesh") updateMeshCells(clone);
inlineStyle(clone);
// remove unused filters
const filters = cloneEl.querySelectorAll("filter");
for (let i=0; i < filters.length; i++) {
const id = filters[i].id;
if (cloneEl.querySelector("[filter='url(#"+id+")']")) continue;
if (cloneEl.getAttribute("filter") === "url(#"+id+")") continue;
filters[i].remove();
}
// remove unused patterns
const patterns = cloneEl.querySelectorAll("pattern");
for (let i=0; i < patterns.length; i++) {
const id = patterns[i].id;
if (cloneEl.querySelector("[fill='url(#"+id+")']")) continue;
patterns[i].remove();
}
// remove unused symbols
const symbols = cloneEl.querySelectorAll("symbol");
for (let i=0; i < symbols.length; i++) {
const id = symbols[i].id;
if (cloneEl.querySelector("use[*|href='#"+id+"']")) continue;
symbols[i].remove();
}
// add displayed emblems
if (layerIsOn("toggleEmblems") && emblems.selectAll("use").size()) {
cloneEl.getElementById("emblems")?.querySelectorAll("use").forEach(el => {
const href = el.getAttribute("href") || el.getAttribute("xlink:href");
if (!href) return;
const emblem = document.getElementById(href.slice(1));
if (emblem) cloneDefs.append(emblem.cloneNode(true));
});
} else {
cloneDefs.querySelector("#defs-emblems")?.remove();
}
// replace ocean pattern href to base64
if (PRODUCTION && cloneEl.getElementById("oceanicPattern")) {
const el = cloneEl.getElementById("oceanicPattern");
const url = el.getAttribute("href");
await new Promise(resolve => {
getBase64(url, base64 => {
el.setAttribute("href", base64);
resolve();
});
});
}
// add relief icons
if (cloneEl.getElementById("terrain")) {
const uniqueElements = new Set();
const terrainNodes = cloneEl.getElementById("terrain").childNodes;
for (let i=0; i < terrainNodes.length; i++) {
const href = terrainNodes[i].getAttribute("href") || terrainNodes[i].getAttribute("xlink:href");
uniqueElements.add(href);
}
const defsRelief = svgDefs.getElementById("defs-relief");
for (const terrain of [...uniqueElements]) {
const element = defsRelief.querySelector(terrain);
if (element) cloneDefs.appendChild(element.cloneNode(true));
}
}
// add wind rose
if (cloneEl.getElementById("compass")) {
const rose = svgDefs.getElementById("rose");
if (rose) cloneDefs.appendChild(rose.cloneNode(true));
}
// add port icon
if (cloneEl.getElementById("anchors")) {
const anchor = svgDefs.getElementById("icon-anchor");
if (anchor) cloneDefs.appendChild(anchor.cloneNode(true));
}
// add grid pattern
if (cloneEl.getElementById("gridOverlay")?.hasChildNodes()) {
const type = cloneEl.getElementById("gridOverlay").getAttribute("type");
const pattern = svgDefs.getElementById("pattern_"+type);
if (pattern) cloneDefs.appendChild(pattern.cloneNode(true));
}
if (!cloneEl.getElementById("hatching").children.length) cloneEl.getElementById("hatching").remove(); //remove unused hatching group
if (!cloneEl.getElementById("fogging-cont")) cloneEl.getElementById("fog").remove(); //remove unused fog
if (!cloneEl.getElementById("regions")) cloneEl.getElementById("statePaths").remove(); // removed unused statePaths
if (!cloneEl.getElementById("labels")) cloneEl.getElementById("textPaths").remove(); // removed unused textPaths
// add armies style
if (cloneEl.getElementById("armies")) cloneEl.insertAdjacentHTML("afterbegin", "<style>#armies text {stroke: none; fill: #fff; text-shadow: 0 0 4px #000; dominant-baseline: central; text-anchor: middle; font-family: Helvetica; fill-opacity: 1;}#armies text.regimentIcon {font-size: .8em;}</style>");
const fontStyle = await GFontToDataURI(getFontsToLoad(clone)); // load non-standard fonts
if (fontStyle) clone.select("defs").append("style").text(fontStyle.join('\n')); // add font to style
clone.remove();
const serialized = `<?xml version="1.0" encoding="UTF-8" standalone="no"?>` + (new XMLSerializer()).serializeToString(cloneEl);
const blob = new Blob([serialized], {type: 'image/svg+xml;charset=utf-8'});
const url = window.URL.createObjectURL(blob);
window.setTimeout(() => window.URL.revokeObjectURL(url), 5000);
return url;
}
// remove hidden g elements and g elements without children to make downloaded svg smaller in size
function removeUnusedElements(clone) {
if (!terrain.selectAll("use").size()) clone.select("#defs-relief").remove();
if (markers.style("display") === "none") clone.select("#defs-markers").remove();
for (let empty = 1; empty;) {
empty = 0;
clone.selectAll("g").each(function() {
if (!this.hasChildNodes() || this.style.display === "none" || this.classList.contains("hidden")) {empty++; this.remove();}
if (this.hasAttribute("display") && this.style.display === "inline") this.removeAttribute("display");
});
}
}
function updateMeshCells(clone) {
const data = renderOcean.checked ? grid.cells.i : grid.cells.i.filter(i => grid.cells.h[i] >= 20);
const scheme = getColorScheme();
clone.select("#heights").attr("filter", "url(#blur1)");
clone.select("#heights").selectAll("polygon").data(data).join("polygon").attr("points", d => getGridPolygon(d))
.attr("id", d => "cell"+d).attr("stroke", d => getColor(grid.cells.h[d], scheme));
}
// for each g element get inline style
function | (clone) {
const emptyG = clone.append("g").node();
const defaultStyles = window.getComputedStyle(emptyG);
clone.selectAll("g, #ruler *, #scaleBar > text").each(function() {
const compStyle = window.getComputedStyle(this);
let style = "";
for (let i=0; i < compStyle.length; i++) {
const key = compStyle[i];
const value = compStyle.getPropertyValue(key);
// Firefox mask hack
if (key === "mask-image" && value !== defaultStyles.getPropertyValue(key)) {
style += "mask-image: url('#land');";
continue;
}
if (key === "cursor") continue; // cursor should be default
if (this.hasAttribute(key)) continue; // don't add style if there is the same attribute
if (value === defaultStyles.getPropertyValue(key)) continue;
style += key + ':' + value + ';';
}
for (const key in compStyle) {
const value = compStyle.getPropertyValue(key);
if (key === "cursor") continue; // cursor should be default
if (this.hasAttribute(key)) continue; // don't add style if there is the same attribute
if (value === defaultStyles.getPropertyValue(key)) continue;
style += key + ':' + value + ';';
}
if (style != "") this.setAttribute('style', style);
});
emptyG.remove();
}
// get non-standard fonts used for labels to fetch them from web
function getFontsToLoad(clone) {
const webSafe = ["Georgia", "Times+New+Roman", "Comic+Sans+MS", "Lucida+Sans+Unicode", "Courier+New", "Verdana", "Arial", "Impact"]; // fonts to not fetch
const fontsInUse = new Set(); // to store fonts currently in use
clone.selectAll("#labels > g").each(function() {
if (!this.hasChildNodes()) return;
const font = this.dataset.font;
if (!font || webSafe.includes(font)) return;
fontsInUse.add(font);
});
const legendFont = legend.attr("data-font");
if (legend.node().hasChildNodes() && !webSafe.includes(legendFont)) fontsInUse.add(legendFont);
const fonts = [...fontsInUse];
return fonts.length ? "https://fonts.googleapis.com/css?family=" + fonts.join("|") : null;
}
// code from Kaiido's answer https://stackoverflow.com/questions/42402584/how-to-use-google-fonts-in-canvas-when-drawing-dom-objects-in-svg
function GFontToDataURI(url) {
if (!url) return Promise.resolve();
return fetch(url) // first fecth the embed stylesheet page
.then(resp => resp.text()) // we only need the text of it
.then(text => {
let s = document.createElement('style');
s.innerHTML = text;
document.head.appendChild(s);
const styleSheet = Array.prototype.filter.call(document.styleSheets, sS => sS.ownerNode === s)[0];
const FontRule = rule => {
const src = rule.style.getPropertyValue('src');
const url = src ? src.split('url(')[1].split(')')[0] : "";
return {rule, src, url: url.substring(url.length - 1, 1)};
}
const fontProms = [];
for (const r of styleSheet.cssRules) {
let fR = FontRule(r);
if (!fR.url) continue;
fontProms.push(
fetch(fR.url) // fetch the actual font-file (.woff)
.then(resp => resp.blob())
.then(blob => {
return new Promise(resolve => {
let f = new FileReader();
f.onload = e => resolve(f.result);
f.readAsDataURL(blob);
})
})
.then(dataURL => fR.rule.cssText.replace(fR.url, dataURL))
)
}
document.head.removeChild(s); // clean up
return Promise.all(fontProms); // wait for all this has been done
});
}
// prepare map data for saving
function getMapData() {
TIME && console.time("createMapDataBlob");
return new Promise(resolve => {
const date = new Date();
const dateString = date.getFullYear() + "-" + (date.getMonth() + 1) + "-" + date.getDate();
const license = "File can be loaded in azgaar.github.io/Fantasy-Map-Generator";
const params = [version, license, dateString, seed, graphWidth, graphHeight, mapId].join("|");
const settings = [distanceUnitInput.value, distanceScaleInput.value, areaUnit.value,
heightUnit.value, heightExponentInput.value, temperatureScale.value,
barSize.value, barLabel.value, barBackOpacity.value, barBackColor.value,
barPosX.value, barPosY.value, populationRate.value, urbanization.value,
mapSizeOutput.value, latitudeOutput.value, temperatureEquatorOutput.value,
temperaturePoleOutput.value, precOutput.value, JSON.stringify(options),
mapName.value].join("|");
const coords = JSON.stringify(mapCoordinates);
const biomes = [biomesData.color, biomesData.habitability, biomesData.name].join("|");
const notesData = JSON.stringify(notes);
const rulersString = rulers.toString();
// clone svg
const cloneEl = document.getElementById("map").cloneNode(true);
// set transform values to default
cloneEl.setAttribute("width", graphWidth);
cloneEl.setAttribute("height", graphHeight);
cloneEl.querySelector("#viewbox").removeAttribute("transform");
// always remove rulers
cloneEl.querySelector("#ruler").innerHTML = "";
const svg_xml = (new XMLSerializer()).serializeToString(cloneEl);
const gridGeneral = JSON.stringify({spacing:grid.spacing, cellsX:grid.cellsX, cellsY:grid.cellsY, boundary:grid.boundary, points:grid.points, features:grid.features});
const features = JSON.stringify(pack.features);
const cultures = JSON.stringify(pack.cultures);
const states = JSON.stringify(pack.states);
const burgs = JSON.stringify(pack.burgs);
const religions = JSON.stringify(pack.religions);
const provinces = JSON.stringify(pack.provinces);
const rivers = JSON.stringify(pack.rivers);
// store name array only if it is not the same as default
const defaultNB = Names.getNameBases();
const namesData = nameBases.map((b,i) => {
const names = defaultNB[i] && defaultNB[i].b === b.b ? "" : b.b;
return `${b.name}|${b.min}|${b.max}|${b.d}|${b.m}|${names}`;
}).join("/");
// round population to save resources
const pop = Array.from(pack.cells.pop).map(p => rn(p, 4));
// data format as below
const data = [params, settings, coords, biomes, notesData, svg_xml,
gridGeneral, grid.cells.h, grid.cells.prec, grid.cells.f, grid.cells.t, grid.cells.temp,
features, cultures, states, burgs,
pack.cells.biome, pack.cells.burg, pack.cells.conf, pack.cells.culture, pack.cells.fl,
pop, pack.cells.r, pack.cells.road, pack.cells.s, pack.cells.state,
pack.cells.religion, pack.cells.province, pack.cells.crossroad, religions, provinces,
namesData, rivers, rulersString].join("\r\n");
const blob = new Blob([data], {type: "text/plain"});
TIME && console.timeEnd("createMapDataBlob");
resolve(blob);
});
}
// Download .map file
async function saveMap() {
if (customization) {tip("Map cannot be saved when edit mode is active, please exit the mode and retry", false, "error"); return;}
closeDialogs("#alert");
const blob = await getMapData();
const URL = window.URL.createObjectURL(blob);
const link = document.createElement("a");
link.download = getFileName() + ".map";
link.href = URL;
link.click();
tip(`${link.download} is saved. Open "Downloads" screen (CTRL + J) to check`, true, "success", 7000);
window.URL.revokeObjectURL(URL);
}
function saveGeoJSON_Cells() {
const json = {type: "FeatureCollection", features: []};
const cells = pack.cells;
const getPopulation = i => {const [r, u] = getCellPopulation(i); return rn(r+u)};
const getHeight = i => parseInt(getFriendlyHeight([cells.p[i][0],cells.p[i][1]]));
cells.i.forEach(i => {
const coordinates = getCellCoordinates(cells.v[i]);
const height = getHeight(i);
const biome = cells.biome[i];
const type = pack.features[cells.f[i]].type;
const population = getPopulation(i);
const state = cells.state[i];
const province = cells.province[i];
const culture = cells.culture[i];
const religion = cells.religion[i];
const neighbors = cells.c[i];
const properties = {id:i, height, biome, type, population, state, province, culture, religion, neighbors}
const feature = {type: "Feature", geometry: {type: "Polygon", coordinates}, properties};
json.features.push(feature);
});
const name = getFileName("Cells") + ".geojson";
downloadFile(JSON.stringify(json), name, "application/json");
}
function saveGeoJSON_Routes() {
const json = {type: "FeatureCollection", features: []};
routes.selectAll("g > path").each(function() {
const coordinates = getRoutePoints(this);
const id = this.id;
const type = this.parentElement.id;
const feature = {type: "Feature", geometry: {type: "LineString", coordinates}, properties: {id, type}};
json.features.push(feature);
});
const name = getFileName("Routes") + ".geojson";
downloadFile(JSON.stringify(json), name, "application/json");
}
function saveGeoJSON_Rivers() {
const json = {type: "FeatureCollection", features: []};
rivers.selectAll("path").each(function() {
const coordinates = getRiverPoints(this);
const id = this.id;
const width = +this.dataset.increment;
const increment = +this.dataset.increment;
const river = pack.rivers.find(r => r.i === +id.slice(5));
const name = river ? river.name : "";
const type = river ? river.type : "";
const i = river ? river.i : "";
const basin = river ? river.basin : "";
const feature = {type: "Feature", geometry: {type: "LineString", coordinates}, properties: {id, i, basin, name, type, width, increment}};
json.features.push(feature);
});
const name = getFileName("Rivers") + ".geojson";
downloadFile(JSON.stringify(json), name, "application/json");
}
function saveGeoJSON_Markers() {
const json = {type: "FeatureCollection", features: []};
markers.selectAll("use").each(function() {
const coordinates = getQGIScoordinates(this.dataset.x, this.dataset.y);
const id = this.id;
const type = (this.dataset.id).substring(1);
const icon = document.getElementById(type).textContent;
const note = notes.length ? notes.find(note => note.id === this.id) : null;
const name = note ? note.name : "";
const legend = note ? note.legend : "";
const feature = {type: "Feature", geometry: {type: "Point", coordinates}, properties: {id, type, icon, name, legend}};
json.features.push(feature);
});
const name = getFileName("Markers") + ".geojson";
downloadFile(JSON.stringify(json), name, "application/json");
}
function getCellCoordinates(vertices) {
const p = pack.vertices.p;
const coordinates = vertices.map(n => getQGIScoordinates(p[n][0], p[n][1]));
return [coordinates.concat([coordinates[0]])];
}
function getRoutePoints(node) {
let points = [];
const l = node.getTotalLength();
const increment = l / Math.ceil(l / 2);
for (let i=0; i <= l; i += increment) {
const p = node.getPointAtLength(i);
points.push(getQGIScoordinates(p.x, p.y));
}
return points;
}
function getRiverPoints(node) {
let points = [];
const l = node.getTotalLength() / 2; // half-length
const increment = 0.25; // defines density of points
for (let i=l, c=i; i >= 0; i -= increment, c += increment) {
const p1 = node.getPointAtLength(i);
const p2 = node.getPointAtLength(c);
const [x, y] = getQGIScoordinates((p1.x + p2.x) / 2, (p1.y + p2.y) / 2);
points.push([x,y]);
}
return points;
}
async function quickSave() {
if (customization) {tip("Map cannot be saved when edit mode is active, please exit the mode and retry", false, "error"); return;}
const blob = await getMapData();
if (blob) ldb.set("lastMap", blob); // auto-save map
tip("Map is saved to browser memory. Please also save as .map file to secure progress", true, "success", 2000);
}
function quickLoad() {
ldb.get("lastMap", blob => {
if (blob) {
loadMapPrompt(blob);
} else {
tip("No map stored. Save map to storage first", true, "error", 2000);
ERROR && console.error("No map stored");
}
});
}
function loadMapPrompt(blob) {
const workingTime = (Date.now() - last(mapHistory).created) / 60000; // minutes
if (workingTime < 5) {loadLastSavedMap(); return;}
alertMessage.innerHTML = `Are you sure you want to load saved map?<br>
All unsaved changes made to the current map will be lost`;
$("#alert").dialog({resizable: false, title: "Load saved map",
buttons: {
Cancel: function() {$(this).dialog("close");},
Load: function() {loadLastSavedMap(); $(this).dialog("close");}
}
});
function loadLastSavedMap() {
WARN && console.warn("Load last saved map");
try {
uploadMap(blob);
}
catch(error) {
ERROR && console.error(error);
tip("Cannot load last saved map", true, "error", 2000);
}
}
}
const saveReminder = function() {
if (localStorage.getItem("noReminder")) return;
const message = ["Please don't forget to save your work as a .map file",
"Please remember to save work as a .map file",
"Saving in .map format will ensure your data won't be lost in case of issues",
"Safety is number one priority. Please save the map",
"Don't forget to save your map on a regular basis!",
"Just a gentle reminder for you to save the map",
"Please don't forget to save your progress (saving as .map is the best option)",
"Don't want to be reminded about need to save? Press CTRL+Q"];
saveReminder.reminder = setInterval(() => {
if (customization) return;
tip(ra(message), true, "warn", 2500);
}, 1e6);
saveReminder.status = 1;
}
saveReminder();
function toggleSaveReminder() {
if (saveReminder.status) {
tip("Save reminder is turned off. Press CTRL+Q again to re-initiate", true, "warn", 2000);
clearInterval(saveReminder.reminder);
localStorage.setItem("noReminder", true);
saveReminder.status = 0;
} else {
tip("Save reminder is turned on. Press CTRL+Q to turn off", true, "warn", 2000);
localStorage.removeItem("noReminder");
saveReminder();
}
}
function uploadMap(file, callback) {
uploadMap.timeStart = performance.now();
const fileReader = new FileReader();
fileReader.onload = function(fileLoadedEvent) {
if (callback) callback();
document.getElementById("coas").innerHTML = ""; // remove auto-generated emblems
const dataLoaded = fileLoadedEvent.target.result;
const data = dataLoaded.split("\r\n");
const mapVersion = data[0].split("|")[0] || data[0];
if (mapVersion === version) {parseLoadedData(data); return;}
const archive = link("https://github.com/Azgaar/Fantasy-Map-Generator/wiki/Changelog", "archived version");
const parsed = parseFloat(mapVersion);
let message = "", load = false;
if (isNaN(parsed) || data.length < 26 || !data[5]) {
message = `The file you are trying to load is outdated or not a valid .map file.
<br>Please try to open it using an ${archive}`;
} else if (parsed < 0.7) {
message = `The map version you are trying to load (${mapVersion}) is too old and cannot be updated to the current version.
<br>Please keep using an ${archive}`;
} else {
load = true;
message = `The map version (${mapVersion}) does not match the Generator version (${version}).
<br>Click OK to get map <b>auto-updated</b>. In case of issues please keep using an ${archive} of the Generator`;
}
alertMessage.innerHTML = message;
$("#alert").dialog({title: "Version conflict", width: "38em", buttons: {
OK: function() {$(this).dialog("close"); if (load) parseLoadedData(data);}
}});
};
fileReader.readAsText(file, "UTF-8");
}
function parseLoadedData(data) {
try {
// exit customization
if (window.closeDialogs) closeDialogs();
customization = 0;
if (customizationMenu.offsetParent) styleTab.click();
const reliefIcons = document.getElementById("defs-relief").innerHTML; // save relief icons
const hatching = document.getElementById("hatching").cloneNode(true); // save hatching
void function parseParameters() {
const params = data[0].split("|");
if (params[3]) {seed = params[3]; optionsSeed.value = seed;}
if (params[4]) graphWidth = +params[4];
if (params[5]) graphHeight = +params[5];
mapId = params[6] ? +params[6] : Date.now();
}()
INFO && console.group("Loaded Map " + seed);
void function parseSettings() {
const settings = data[1].split("|");
if (settings[0]) applyOption(distanceUnitInput, settings[0]);
if (settings[1]) distanceScaleInput.value = distanceScaleOutput.value = settings[1];
if (settings[2]) areaUnit.value = settings[2];
if (settings[3]) applyOption(heightUnit, settings[3]);
if (settings[4]) heightExponentInput.value = heightExponentOutput.value = settings[4];
if (settings[5]) temperatureScale.value = settings[5];
if (settings[6]) barSize.value = barSizeOutput.value = settings[6];
if (settings[7] !== undefined) barLabel.value = settings[7];
if (settings[8] !== undefined) barBackOpacity.value = settings[8];
if (settings[9]) barBackColor.value = settings[9];
if (settings[10]) barPosX.value = settings[10];
if (settings[11]) barPosY.value = settings[11];
if (settings[12]) populationRate.value = populationRateOutput.value = settings[12];
if (settings[13]) urbanization.value = urbanizationOutput.value = settings[13];
if (settings[14]) mapSizeInput.value = mapSizeOutput.value = Math.max(Math.min(settings[14], 100), 1);
if (settings[15]) latitudeInput.value = latitudeOutput.value = Math.max(Math.min(settings[15], 100), 0);
if (settings[16]) temperatureEquatorInput.value = temperatureEquatorOutput.value = settings[16];
if (settings[17]) temperaturePoleInput.value = temperaturePoleOutput.value = settings[17];
if (settings[18]) precInput.value = precOutput.value = settings[18];
if (settings[19]) options = JSON.parse(settings[19]);
if (settings[20]) mapName.value = settings[20];
}()
void function parseConfiguration() {
if (data[2]) mapCoordinates = JSON.parse(data[2]);
if (data[4]) notes = JSON.parse(data[4]);
if (data[33]) rulers.fromString(data[33]);
const biomes = data[3].split("|");
biomesData = applyDefaultBiomesSystem();
biomesData.color = biomes[0].split(",");
biomesData.habitability = biomes[1].split(",").map(h => +h);
biomesData.name = biomes[2].split(",");
// push custom biomes if any
for (let i=biomesData.i.length; i < biomesData.name.length; i++) {
biomesData.i.push(biomesData.i.length);
biomesData.iconsDensity.push(0);
biomesData.icons.push([]);
biomesData.cost.push(50);
}
}()
void function replaceSVG() {
svg.remove();
document.body.insertAdjacentHTML("afterbegin", data[5]);
}()
void function redefineElements() {
svg = d3.select("#map");
defs = svg.select("#deftemp");
viewbox = svg.select("#viewbox");
scaleBar = svg.select("#scaleBar");
legend = svg.select("#legend");
ocean = viewbox.select("#ocean");
oceanLayers = ocean.select("#oceanLayers");
oceanPattern = ocean.select("#oceanPattern");
lakes = viewbox.select("#lakes");
landmass = viewbox.select("#landmass");
texture = viewbox.select("#texture");
terrs = viewbox.select("#terrs");
biomes = viewbox.select("#biomes");
ice = viewbox.select("#ice");
cells = viewbox.select("#cells");
gridOverlay = viewbox.select("#gridOverlay");
coordinates = viewbox.select("#coordinates");
compass = viewbox.select("#compass");
rivers = viewbox.select("#rivers");
terrain = viewbox.select("#terrain");
relig = viewbox.select("#relig");
cults = viewbox.select("#cults");
regions = viewbox.select("#regions");
statesBody = regions.select("#statesBody");
statesHalo = regions.select("#statesHalo");
provs = viewbox.select("#provs");
zones = viewbox.select("#zones");
borders = viewbox.select("#borders");
stateBorders = borders.select("#stateBorders");
provinceBorders = borders.select("#provinceBorders");
routes = viewbox.select("#routes");
roads = routes.select("#roads");
trails = routes.select("#trails");
searoutes = routes.select("#searoutes");
temperature = viewbox.select("#temperature");
coastline = viewbox.select("#coastline");
prec = viewbox.select("#prec");
population = viewbox.select("#population");
emblems = viewbox.select("#emblems");
labels = viewbox.select("#labels");
icons = viewbox.select("#icons");
burgIcons = icons.select("#burgIcons");
anchors = icons.select("#anchors");
armies = viewbox.select("#armies");
markers = viewbox.select("#markers");
ruler = viewbox.select("#ruler");
fogging = viewbox.select("#fogging");
debug = viewbox.select("#debug");
burgLabels = labels.select("#burgLabels");
}()
void function parseGridData() {
grid = JSON.parse(data[6]);
calculateVoronoi(grid, grid.points);
grid.cells.h = Uint8Array.from(data[7].split(","));
grid.cells.prec = Uint8Array.from(data[8].split(","));
grid.cells.f = Uint16Array.from(data[9].split(","));
grid.cells.t = Int8Array.from(data[10].split(","));
grid.cells.temp = Int8Array.from(data[11].split(","));
}()
void function parsePackData() {
pack = {};
reGraph();
reMarkFeatures();
pack.features = JSON.parse(data[12]);
pack.cultures = JSON.parse(data[13]);
pack.states = JSON.parse(data[14]);
pack.burgs = JSON.parse(data[15]);
pack.religions = data[29] ? JSON.parse(data[29]) : [{i: 0, name: "No religion"}];
pack.provinces = data[30] ? JSON.parse(data[30]) : [0];
pack.rivers = data[32] ? JSON.parse(data[32]) : [];
const cells = pack.cells;
cells.biome = Uint8Array.from(data[16].split(","));
cells.burg = Uint16Array.from(data[17].split(","));
cells.conf = Uint8Array.from(data[18].split(","));
cells.culture = Uint16Array.from(data[19].split(","));
cells.fl = Uint16Array.from(data[20].split(","));
cells.pop = Float32Array.from(data[21].split(","));
cells.r = Uint16Array.from(data[22].split(","));
cells.road = Uint16Array.from(data[23].split(","));
cells.s = Uint16Array.from(data[24].split(","));
cells.state = Uint16Array.from(data[25].split(","));
cells.religion = data[26] ? Uint16Array.from(data[26].split(",")) : new Uint16Array(cells.i.length);
cells.province = data[27] ? Uint16Array.from(data[27].split(",")) : new Uint16Array(cells.i.length);
cells.crossroad = data[28] ? Uint16Array.from(data[28].split(",")) : new Uint16Array(cells.i.length);
if (data[31]) {
const namesDL = data[31].split("/");
namesDL.forEach((d, i) => {
const e = d.split("|");
if (!e.length) return;
const b = e[5].split(",").length > 2 || !nameBases[i] ? e[5] : nameBases[i].b;
nameBases[i] = {name:e[0], min:e[1], max:e[2], d:e[3], m:e[4], b};
});
}
}()
const notHidden = selection => selection.node() && selection.style("display") !== "none";
const hasChildren = selection => selection.node()?.hasChildNodes();
const hasChild = (selection, selector) => selection.node()?.querySelector(selector);
const turnOn = el => document.getElementById(el).classList.remove("buttonoff");
void function restoreLayersState() {
// turn all layers off
document.getElementById("mapLayers").querySelectorAll("li").forEach(el => el.classList.add("buttonoff"));
// turn on active layers
if (notHidden(texture) && hasChild(texture, "image")) turnOn("toggleTexture");
if (hasChildren(terrs)) turnOn("toggleHeight");
if (hasChildren(biomes)) turnOn("toggleBiomes");
if (hasChildren(cells)) turnOn("toggleCells");
if (hasChildren(gridOverlay)) turnOn("toggleGrid");
if (hasChildren(coordinates)) turnOn("toggleCoordinates");
if (notHidden(compass) && hasChild(compass, "use")) turnOn("toggleCompass");
if (notHidden(rivers)) turnOn("toggleRivers");
if (notHidden(terrain) && hasChildren(terrain)) turnOn("toggleRelief");
if (hasChildren(relig)) turnOn("toggleReligions");
if (hasChildren(cults)) turnOn("toggleCultures");
if (hasChildren(statesBody)) turnOn("toggleStates");
if (hasChildren(provs)) turnOn("toggleProvinces");
if (hasChildren(zones) && notHidden(zones)) turnOn("toggleZones");
if (notHidden(borders) && hasChild(compass, "use")) turnOn("toggleBorders");
if (notHidden(routes) && hasChild(routes, "path")) turnOn("toggleRoutes");
if (hasChildren(temperature)) turnOn("toggleTemp");
if (hasChild(population, "line")) turnOn("togglePopulation");
if (hasChildren(ice)) turnOn("toggleIce");
if (hasChild(prec, "circle")) turnOn("togglePrec");
if (notHidden(emblems) && hasChild(emblems, "use")) turnOn("toggleEmblems");
if (notHidden(labels)) turnOn("toggleLabels");
if (notHidden(icons)) turnOn("toggleIcons");
if (hasChildren(armies) && notHidden(armies)) turnOn("toggleMilitary");
if (hasChildren(markers) && notHidden(markers)) turnOn("toggleMarkers");
if (notHidden(ruler)) turnOn("toggleRulers");
if (notHidden(scaleBar)) turnOn("toggleScaleBar");
getCurrentPreset();
}()
void function restoreEvents() {
scaleBar.on("mousemove", () => tip("Click to open Units Editor")).on("click", () => editUnits());
legend.on("mousemove", () => tip("Drag to change the position. Click to hide the legend")).on("click", () => clearLegend());
}()
void function resolveVersionConflicts() {
const version = parseFloat(data[0].split("|")[0]);
if (version < 0.9) {
// 0.9 has additional relief icons to be included into older maps
document.getElementById("defs-relief").innerHTML = reliefIcons;
}
if (version < 1) {
// 1.0 adds a new religions layer
relig = viewbox.insert("g", "#terrain").attr("id", "relig");
Religions.generate();
// 1.0 adds a legend box
legend = svg.append("g").attr("id", "legend");
legend.attr("font-family", "Almendra SC").attr("data-font", "Almendra+SC")
.attr("font-size", 13).attr("data-size", 13).attr("data-x", 99).attr("data-y", 93)
.attr("stroke-width", 2.5).attr("stroke", "#812929").attr("stroke-dasharray", "0 4 10 4").attr("stroke-linecap", "round");
// 1.0 separated drawBorders fron drawStates()
stateBorders = borders.append("g").attr("id", "stateBorders");
provinceBorders = borders.append("g").attr("id", "provinceBorders");
borders.attr("opacity", null).attr("stroke", null).attr("stroke-width", null).attr("stroke-dasharray", null).attr("stroke-linecap", null).attr("filter", null);
stateBorders.attr("opacity", .8).attr("stroke", "#56566d").attr("stroke-width", 1).attr("stroke-dasharray", "2").attr("stroke-linecap", "butt");
provinceBorders.attr("opacity", .8).attr("stroke", "#56566d").attr("stroke-width", .5).attr("stroke-dasharray", "1").attr("stroke-linecap", "butt");
// 1.0 adds state relations, provinces, forms and full names
provs = viewbox.insert("g", "#borders").attr("id", "provs").attr("opacity", .6);
BurgsAndStates.collectStatistics();
BurgsAndStates.generateCampaigns();
BurgsAndStates.generateDiplomacy();
BurgsAndStates.defineStateForms();
drawStates();
BurgsAndStates.generateProvinces();
drawBorders();
if (!layerIsOn("toggleBorders")) $('#borders').fadeOut();
if (!layerIsOn("toggleStates")) regions.attr("display", "none").selectAll("path").remove();
// 1.0 adds hatching
document.getElementsByTagName("defs")[0].appendChild(hatching);
// 1.0 adds zones layer
zones = viewbox.insert("g", "#borders").attr("id", "zones").attr("display", "none");
zones.attr("opacity", .6).attr("stroke", null).attr("stroke-width", 0).attr("stroke-dasharray", null).attr("stroke-linecap", "butt");
addZones();
if (!markers.selectAll("*").size()) {addMarkers(); turnButtonOn("toggleMarkers");}
// 1.0 add fogging layer (state focus)
fogging = viewbox.insert("g", "#ruler").attr("id", "fogging-cont").attr("mask", "url(#fog)").append("g").attr("id", "fogging").style("display", "none");
fogging.append("rect").attr("x", 0).attr("y", 0).attr("width", "100%").attr("height", "100%");
defs.append("mask").attr("id", "fog").append("rect").attr("x", 0).attr("y", 0).attr("width", "100%").attr("height", "100%").attr("fill", "white");
// 1.0 changes states opacity bask to regions level
if (statesBody.attr("opacity")) {
regions.attr("opacity", statesBody.attr("opacity"));
statesBody.attr("opacity", null);
}
// 1.0 changed labels to multi-lined
labels.selectAll("textPath").each(function() {
const text = this.textContent;
const shift = this.getComputedTextLength() / -1.5;
this.innerHTML = `<tspan x="${shift}">${text}</tspan>`;
});
// 1.0 added new biome - Wetland
biomesData.name.push("Wetland");
biomesData.color.push("#0b9131");
biomesData.habitability.push(12);
}
if (version < 1.1) {
// v 1.0 initial code had a bug with religion layer id
if (!relig.size()) relig = viewbox.insert("g", "#terrain").attr("id", "relig");
// v 1.0 initially has Sympathy status then relaced with Friendly
for (const s of pack.states) {
if (!s.diplomacy) continue;
s.diplomacy = s.diplomacy.map(r => r === "Sympathy" ? "Friendly" : r);
}
// labels should be toggled via style attribute, so remove display attribute
labels.attr("display", null);
// v 1.0 added religions heirarchy tree
if (pack.religions[1] && !pack.religions[1].code) {
pack.religions.filter(r => r.i).forEach(r => {
r.origin = 0;
r.code = r.name.slice(0, 2);
});
}
if (!document.getElementById("freshwater")) {
lakes.append("g").attr("id", "freshwater");
lakes.select("#freshwater").attr("opacity", .5).attr("fill", "#a6c1fd").attr("stroke", "#5f799d").attr("stroke-width", .7).attr("filter", null);
}
if (!document.getElementById("salt")) {
lakes.append("g").attr("id", "salt");
lakes.select("#salt").attr("opacity", .5).attr("fill", "#409b8a").attr("stroke", "#388985").attr("stroke-width", .7).attr("filter", null);
}
// v 1.1 added new lake and coast groups
if (!document.getElementById("sinkhole")) {
lakes.append("g").attr("id", "sinkhole");
lakes.append("g").attr("id", "frozen");
lakes.append("g").attr("id", "lava");
lakes.select("#sinkhole").attr("opacity", 1).attr("fill", "#5bc9fd").attr("stroke", "#53a3b0").attr("stroke-width", .7).attr("filter", null);
lakes.select("#frozen").attr("opacity", .95).attr("fill", "#cdd4e7").attr("stroke", "#cfe0eb").attr("stroke-width", 0).attr("filter", null);
lakes.select("#lava").attr("opacity", .7).attr("fill", "#90270d").attr("stroke", "#f93e0c").attr("stroke-width", 2).attr("filter", "url(#crumpled)");
coastline.append("g").attr("id", "sea_island");
coastline.append("g").attr("id", "lake_island");
coastline.select("#sea_island").attr("opacity", .5).attr("stroke", "#1f3846").attr("stroke-width", .7).attr("filter", "url(#dropShadow)");
coastline.select("#lake_island").attr("opacity", 1).attr("stroke", "#7c8eaf").attr("stroke-width", .35).attr("filter", null);
}
// v 1.1 features stores more data
defs.select("#land").selectAll("path").remove();
defs.select("#water").selectAll("path").remove();
coastline.selectAll("path").remove();
lakes.selectAll("path").remove();
drawCoastline();
}
if (version < 1.11) {
// v 1.11 added new attributes
terrs.attr("scheme", "bright").attr("terracing", 0).attr("skip", 5).attr("relax", 0).attr("curve", 0);
svg.select("#oceanic > *").attr("id", "oceanicPattern");
oceanLayers.attr("layers", "-6,-3,-1");
gridOverlay.attr("type", "pointyHex").attr("size", 10);
// v 1.11 added cultures heirarchy tree
if (pack.cultures[1] && !pack.cultures[1].code) {
pack.cultures.filter(c => c.i).forEach(c => {
c.origin = 0;
c.code = c.name.slice(0, 2);
});
}
// v 1.11 had an issue with fogging being displayed on load
unfog();
// v 1.2 added new terrain attributes
if (!terrain.attr("set")) terrain.attr("set", "simple");
if (!terrain.attr("size")) terrain.attr("size", 1);
if (!terrain.attr("density")) terrain.attr("density", .4);
}
if (version < 1.21) {
// v 1.11 replaced "display" attribute by "display" style
viewbox.selectAll("g").each(function() {
if (this.hasAttribute("display")) {
this.removeAttribute("display");
this.style.display = "none";
}
});
// v 1.21 added rivers data to pack
pack.rivers = []; // rivers data
rivers.selectAll("path").each(function() {
const i = +this.id.slice(5);
const length = this.getTotalLength() / 2;
const s = this.getPointAtLength(length), e = this.getPointAtLength(0);
const source = findCell(s.x, s.y), mouth = findCell(e.x, e.y);
const name = Rivers.getName(mouth);
const type = length < 25 ? rw({"Creek":9, "River":3, "Brook":3, "Stream":1}) : "River";
pack.rivers.push({i, parent:0, length, source, mouth, basin:i, name, type});
});
}
if (version < 1.22) {
// v 1.22 changed state neighbors from Set object to array
BurgsAndStates.collectStatistics();
}
if (version < 1.3) {
// v 1.3 added global options object
const winds = options.slice(); // previostly wind was saved in settings[19]
const year = rand(100, 2000);
const era = Names.getBaseShort(P(.7) ? 1 : rand(nameBases.length)) + " Era";
const eraShort = era[0] + "E";
const military = Military.getDefaultOptions();
options = {winds, year, era, eraShort, military};
// v 1.3 added campaings data for all states
BurgsAndStates.generateCampaigns();
// v 1.3 added militry layer
armies = viewbox.insert("g", "#icons").attr("id", "armies");
armies.attr("opacity", 1).attr("fill-opacity", 1).attr("font-size", 6).attr("box-size", 3).attr("stroke", "#000").attr("stroke-width", .3);
turnButtonOn("toggleMilitary");
Military.generate();
}
if (version < 1.4) {
// v 1.35 added dry lakes
if (!lakes.select("#dry").size()) {
lakes.append("g").attr("id", "dry");
lakes.select("#dry").attr("opacity", 1).attr("fill", "#c9bfa7").attr("stroke", "#8e816f").attr("stroke-width", .7).attr("filter", null);
}
// v 1.4 added ice layer
ice = viewbox.insert("g", "#coastline").attr("id", "ice").style("display", "none");
ice.attr("opacity", null).attr("fill", "#e8f0f6").attr("stroke", "#e8f0f6").attr("stroke-width", 1).attr("filter", "url(#dropShadow05)");
drawIce();
// v 1.4 added icon and power attributes for units
for (const unit of options.military) {
if (!unit.icon) unit.icon = getUnitIcon(unit.type);
if (!unit.power) unit.power = unit.crew;
}
function getUnitIcon(type) {
if (type === "naval") return "🌊";
if (type === "ranged") return "🏹";
if (type === "mounted") return "🐴";
if (type === "machinery") return "💣";
if (type === "armored") return "🐢";
if (type === "aviation") return "🦅";
if (type === "magical") return "🔮";
else return "⚔️";
}
// 1.4 added state reference for regiments
pack.states.filter(s => s.military).forEach(s => s.military.forEach(r => r.state = s.i));
}
if (version < 1.5) {
// not need to store default styles from v 1.5
localStorage.removeItem("styleClean");
localStorage.removeItem("styleGloom");
localStorage.removeItem("styleAncient");
localStorage.removeItem("styleMonochrome");
// v 1.5 cultures has shield attribute
pack.cultures.forEach(culture => {
if (culture.removed) return;
culture.shield = Cultures.getRandomShield();
});
// v 1.5 added burg type value
pack.burgs.forEach(burg => {
if (!burg.i || burg.removed) return;
burg.type = BurgsAndStates.getType(burg.cell, burg.port);
});
// v 1.5 added emblems
defs.append("g").attr("id", "defs-emblems");
emblems = viewbox.insert("g", "#population").attr("id", "emblems").style("display", "none");
emblems.append("g").attr("id", "burgEmblems");
emblems.append("g").attr("id", "provinceEmblems");
emblems.append("g").attr("id", "stateEmblems");
regenerateEmblems();
toggleEmblems();
// v 1.5 changed releif icons data
terrain.selectAll("use").each(function() {
const type = this.getAttribute("data-type") || this.getAttribute("xlink:href");
this.removeAttribute("xlink:href");
this.removeAttribute("data-type");
this.removeAttribute("data-size");
this.setAttribute("href", type);
});
}
if (version < 1.6) {
// v 1.6 changed rivers data
for (const river of pack.rivers) {
const el = document.getElementById("river"+river.i);
if (el) {
river.widthFactor = +el.getAttribute("data-width");
el.removeAttribute("data-width");
el.removeAttribute("data-increment");
river.discharge = pack.cells.fl[river.mouth] || 1;
river.width = rn(river.length / 100, 2);
river.sourceWidth = .1;
} else {
Rivers.remove(river.i);
}
}
// v 1.6 changed lakes data
for (const f of pack.features) {
if (f.type !== "lake") continue;
if (f.evaporation) continue;
f.flux = f.flux || f.cells * 3;
f.temp = grid.cells.temp[pack.cells.g[f.firstCell]];
f.height = f.height || d3.min(pack.cells.c[f.firstCell].map(c => pack.cells.h[c]).filter(h => h >= 20));
const height = (f.height - 18) ** heightExponentInput.value;
const evaporation = (700 * (f.temp + .006 * height) / 50 + 75) / (80 - f.temp);
f.evaporation = rn(evaporation * f.cells);
f.name = f.name || Lakes.getName(f);
delete f.river;
}
}
if (version < 1.61) {
// v 1.61 changed rulers data
ruler.style("display", null);
rulers = new Rulers();
ruler.selectAll(".ruler > .white").each(function() {
const x1 = +this.getAttribute("x1");
const y1 = +this.getAttribute("y1");
const x2 = +this.getAttribute("x2");
const y2 = +this.getAttribute("y2");
if (isNaN(x1) || isNaN(y1) || isNaN(x2) || isNaN(y2)) return;
const points = [[x1, y1], [x2, y2]];
rulers.create(Ruler, points);
});
ruler.selectAll("g.opisometer").each(function() {
const pointsString = this.dataset.points;
if (!pointsString) return;
const points = JSON.parse(pointsString);
rulers.create(Opisometer, points);
});
ruler.selectAll("path.planimeter").each(function() {
const length = this.getTotalLength();
if (length < 30) return;
const step = length > 1000 ? 40 : length > 400 ? 20 : 10;
const increment = length / Math.ceil(length / step);
const points = [];
for (let i=0; i <= length; i += increment) {
const point = this.getPointAtLength(i);
points.push([point.x | 0, point.y | 0]);
}
rulers.create(Planimeter, points);
});
ruler.selectAll("*").remove();
if (rulers.data.length) {
turnButtonOn("toggleRulers");
rulers.draw();
} else turnButtonOff("toggleRulers");
// 1.61 changed oceanicPattern from rect to image
const pattern = document.getElementById("oceanic");
const filter = pattern.firstElementChild.getAttribute("filter");
const href = filter ? "./images/" + filter.replace("url(#", "").replace(")", "") + ".png" : "";
pattern.innerHTML = `<image id="oceanicPattern" href=${href} width="100" height="100"></image>`;
document.getElementById("oceanPattern").setAttribute("opacity", .2);
}
}()
if (version < 1.62) {
// v 1.62 changed grid data
gridOverlay.attr("size", null);
}
void function checkDataIntegrity() {
const cells = pack.cells;
if (pack.cells.i.length !== pack.cells.state.length) {
ERROR && console.error("Striping issue. Map data is corrupted. The only solution is to edit the heightmap in erase mode");
}
const invalidStates = [...new Set(cells.state)].filter(s => !pack.states[s] || pack.states[s].removed);
invalidStates.forEach(s => {
const invalidCells = cells.i.filter(i => cells.state[i] === s);
invalidCells.forEach(i => cells.state[i] = 0);
ERROR && console.error("Data Integrity Check. Invalid state", s, "is assigned to cells", invalidCells);
});
const invalidProvinces = [...new Set(cells.province)].filter(p => p && (!pack.provinces[p] || pack.provinces[p].removed));
invalidProvinces.forEach(p => {
const invalidCells = cells.i.filter(i => cells.province[i] === p);
invalidCells.forEach(i => cells.province[i] = 0);
ERROR && console.error("Data Integrity Check. Invalid province", p, "is assigned to cells", invalidCells);
});
const invalidCultures = [...new Set(cells.culture)].filter(c => !pack.cultures[c] || pack.cultures[c].removed);
invalidCultures.forEach(c => {
const invalidCells = cells.i.filter(i => cells.culture[i] === c);
invalidCells.forEach(i => cells.province[i] = 0);
ERROR && console.error("Data Integrity Check. Invalid culture", c, "is assigned to cells", invalidCells);
});
const invalidReligions = [...new Set(cells.religion)].filter(r => !pack.religions[r] || pack.religions[r].removed);
invalidReligions.forEach(r => {
const invalidCells = cells.i.filter(i => cells.religion[i] === r);
invalidCells.forEach(i => cells.religion[i] = 0);
ERROR && console.error("Data Integrity Check. Invalid religion", c, "is assigned to cells", invalidCells);
});
const invalidFeatures = [...new Set(cells.f)].filter(f => f && !pack.features[f]);
invalidFeatures.forEach(f => {
const invalidCells = cells.i.filter(i => cells.f[i] === f);
// No fix as for now
ERROR && console.error("Data Integrity Check. Invalid feature", f, "is assigned to cells", invalidCells);
});
const invalidBurgs = [...new Set(cells.burg)].filter(b => b && (!pack.burgs[b] || pack.burgs[b].removed));
invalidBurgs.forEach(b => {
const invalidCells = cells.i.filter(i => cells.burg[i] === b);
invalidCells.forEach(i => cells.burg[i] = 0);
ERROR && console.error("Data Integrity Check. Invalid burg", b, "is assigned to cells", invalidCells);
});
const invalidRivers = [...new Set(cells.r)].filter(r => r && !pack.rivers.find(river => river.i === r));
invalidRivers.forEach(r => {
const invalidCells = cells.i.filter(i => cells.r[i] === r);
invalidCells.forEach(i => cells.r[i] = 0);
rivers.select("river"+r).remove();
ERROR && console.error("Data Integrity Check. Invalid river", r, "is assigned to cells", invalidCells);
});
pack.burgs.forEach(b => {
if (!b.i || b.removed) return;
if (b.port < 0) {ERROR && console.error("Data Integrity Check. Burg", b.i, "has invalid port value", b.port); b.port = 0;}
if (b.cell >= cells.i.length) {
ERROR && console.error("Data Integrity Check. Burg", b.i, "is linked to invalid cell", b.cell);
b.cell = findCell(b.x, b.y);
cells.i.filter(i => cells.burg[i] === b.i).forEach(i => cells.burg[i] = 0);
cells.burg[b.cell] = b.i;
}
if (b.state && !pack.states[b.state]) {
ERROR && console.error("Data Integrity Check. Burg", b.i, "is linked to invalid state", b.state);
b.state = 0;
}
});
pack.provinces.forEach(p => {
if (!p.i || p.removed) return;
if (pack.states[p.state] && !pack.states[p.state].removed) return;
ERROR && console.error("Data Integrity Check. Province", p.i, "is linked to removed state", p.state);
p.removed = true; // remove incorrect province
});
}()
changeMapSize();
// remove href from emblems, to trigger rendering on load
emblems.selectAll("use").attr("href", null);
// draw data layers (no kept in svg)
if (rulers && layerIsOn("toggleRulers")) rulers.draw();
if (layerIsOn("toggleGrid")) drawGrid();
// set options
yearInput.value = options.year;
eraInput.value = options.era;
if (window.restoreDefaultEvents) restoreDefaultEvents();
focusOn(); // based on searchParams focus on point, cell or burg
invokeActiveZooming();
WARN && console.warn(`TOTAL: ${rn((performance.now()-uploadMap.timeStart)/1000,2)}s`);
showStatistics();
INFO && console.groupEnd("Loaded Map " + seed);
tip("Map is successfully loaded", true, "success", 7000);
}
catch(error) {
ERROR && console.error(error);
clearMainTip();
alertMessage.innerHTML = `An error is occured on map loading. Select a different file to load,
<br>generate a new random map or cancel the loading
<p id="errorBox">${parseError(error)}</p>`;
$("#alert").dialog({
resizable: false, title: "Loading error", maxWidth:"50em", buttons: {
"Select file": function() {$(this).dialog("close"); mapToLoad.click();},
"New map": function() {$(this).dialog("close"); regenerateMap();},
Cancel: function() {$(this).dialog("close")}
}, position: {my: "center", at: "center", of: "svg"}
});
}
}
| inlineStyle |
rsn.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::client::protection::Protection,
anyhow::{bail, ensure, format_err},
eapol,
fidl_fuchsia_wlan_common::DriverFeature,
fidl_fuchsia_wlan_mlme::{DeviceInfo, SaeFrame},
fidl_fuchsia_wlan_sme as fidl_sme,
std::boxed::Box,
wlan_common::{bss::BssDescription, ie::rsn::rsne},
wlan_rsn::{
self, auth, nonce::NonceReader, psk, rsna::UpdateSink, Error, NegotiatedProtection,
ProtectionInfo,
},
};
#[derive(Debug)]
pub struct Rsna {
pub negotiated_protection: NegotiatedProtection,
pub supplicant: Box<dyn Supplicant>,
}
impl PartialEq for Rsna {
fn eq(&self, other: &Self) -> bool |
}
pub trait Supplicant: std::fmt::Debug + std::marker::Send {
fn start(&mut self) -> Result<(), Error>;
fn reset(&mut self);
fn on_eapol_frame(
&mut self,
update_sink: &mut UpdateSink,
frame: eapol::Frame<&[u8]>,
) -> Result<(), Error>;
fn on_pmk_available(
&mut self,
update_sink: &mut UpdateSink,
pmk: &[u8],
pmkid: &[u8],
) -> Result<(), Error>;
fn on_sae_handshake_ind(&mut self, update_sink: &mut UpdateSink) -> Result<(), Error>;
fn on_sae_frame_rx(
&mut self,
update_sink: &mut UpdateSink,
frame: SaeFrame,
) -> Result<(), Error>;
fn on_sae_timeout(&mut self, update_sink: &mut UpdateSink, event_id: u64) -> Result<(), Error>;
fn get_auth_cfg(&self) -> &auth::Config;
fn get_auth_method(&self) -> auth::MethodName;
}
impl Supplicant for wlan_rsn::Supplicant {
fn start(&mut self) -> Result<(), Error> {
wlan_rsn::Supplicant::start(self)
}
fn reset(&mut self) {
wlan_rsn::Supplicant::reset(self)
}
fn on_eapol_frame(
&mut self,
update_sink: &mut UpdateSink,
frame: eapol::Frame<&[u8]>,
) -> Result<(), Error> {
wlan_rsn::Supplicant::on_eapol_frame(self, update_sink, frame)
}
fn on_pmk_available(
&mut self,
update_sink: &mut UpdateSink,
pmk: &[u8],
pmkid: &[u8],
) -> Result<(), Error> {
wlan_rsn::Supplicant::on_pmk_available(self, update_sink, pmk, pmkid)
}
fn on_sae_handshake_ind(&mut self, update_sink: &mut UpdateSink) -> Result<(), Error> {
wlan_rsn::Supplicant::on_sae_handshake_ind(self, update_sink)
}
fn on_sae_frame_rx(
&mut self,
update_sink: &mut UpdateSink,
frame: SaeFrame,
) -> Result<(), Error> {
wlan_rsn::Supplicant::on_sae_frame_rx(self, update_sink, frame)
}
fn on_sae_timeout(&mut self, update_sink: &mut UpdateSink, event_id: u64) -> Result<(), Error> {
wlan_rsn::Supplicant::on_sae_timeout(self, update_sink, event_id)
}
fn get_auth_cfg(&self) -> &auth::Config {
&self.auth_cfg
}
fn get_auth_method(&self) -> auth::MethodName {
self.auth_cfg.method_name()
}
}
pub fn get_wpa2_rsna(
device_info: &DeviceInfo,
credential: &fidl_sme::Credential,
bss: &BssDescription,
) -> Result<Protection, anyhow::Error> {
let a_rsne_bytes = match bss.rsne.as_ref() {
None => return Err(format_err!("RSNE not present in BSS")),
Some(rsne) => &rsne[..],
};
// Credentials supplied and BSS is protected.
let (_, a_rsne) = rsne::from_bytes(a_rsne_bytes)
.map_err(|e| format_err!("invalid RSNE {:02x?}: {:?}", a_rsne_bytes, e))?;
let s_rsne = a_rsne.derive_wpa2_s_rsne()?;
let negotiated_protection = NegotiatedProtection::from_rsne(&s_rsne)?;
let psk = compute_psk(credential, &bss.ssid[..])?;
let supplicant = wlan_rsn::Supplicant::new_wpa_personal(
// Note: There should be one Reader per device, not per SME.
// Follow-up with improving on this.
NonceReader::new(&device_info.mac_addr[..])?,
psk,
device_info.mac_addr,
ProtectionInfo::Rsne(s_rsne),
bss.bssid,
ProtectionInfo::Rsne(a_rsne),
)
.map_err(|e| format_err!("failed to create ESS-SA: {:?}", e))?;
Ok(Protection::Rsna(Rsna { negotiated_protection, supplicant: Box::new(supplicant) }))
}
pub fn compute_psk(
credential: &fidl_sme::Credential,
ssid: &[u8],
) -> Result<auth::Config, anyhow::Error> {
match credential {
fidl_sme::Credential::Password(password) => {
psk::compute(&password[..], ssid).map(auth::Config::ComputedPsk)
}
fidl_sme::Credential::Psk(psk) => {
ensure!(psk.len() == 32, "PSK must be 32 octets but was {}", psk.len());
Ok(auth::Config::ComputedPsk(psk.clone().into_boxed_slice()))
}
fidl_sme::Credential::None(..) => bail!("expected credentials but none provided"),
_ => bail!("unsupported credentials configuration for computing PSK"),
}
}
fn get_wpa3_auth_config(
device_info: &DeviceInfo,
password: Vec<u8>,
bss: &BssDescription,
) -> Result<auth::Config, anyhow::Error> {
// Prefer to perform SAE in SME if possible.
let mut selected_feature = None;
for feature in &device_info.driver_features {
match feature {
DriverFeature::SaeSmeAuth => {
selected_feature.replace(feature);
break;
}
DriverFeature::SaeDriverAuth => {
selected_feature.replace(feature);
}
_ => (),
}
}
match selected_feature {
Some(DriverFeature::SaeSmeAuth) => Ok(auth::Config::Sae {
password,
mac: device_info.mac_addr.clone(),
peer_mac: bss.bssid.clone(),
}),
Some(DriverFeature::SaeDriverAuth) => Ok(auth::Config::DriverSae { password }),
_ => Err(format_err!("Could not generate WPA3 auth config -- no SAE driver feature")),
}
}
pub fn get_wpa3_rsna(
device_info: &DeviceInfo,
credential: &fidl_sme::Credential,
bss: &BssDescription,
) -> Result<Protection, anyhow::Error> {
let password = match credential {
fidl_sme::Credential::Password(pwd) => pwd.to_vec(),
_ => bail!("Unexpected credential type"),
};
let a_rsne_bytes = match bss.rsne.as_ref() {
None => return Err(format_err!("RSNE not present in BSS")),
Some(rsne) => &rsne[..],
};
let (_, a_rsne) = rsne::from_bytes(a_rsne_bytes)
.map_err(|e| format_err!("invalid RSNE {:02x?}: {:?}", a_rsne_bytes, e))?;
let s_rsne = a_rsne.derive_wpa3_s_rsne()?;
let negotiated_protection = NegotiatedProtection::from_rsne(&s_rsne)?;
let supplicant = wlan_rsn::Supplicant::new_wpa_personal(
NonceReader::new(&device_info.mac_addr[..])?,
get_wpa3_auth_config(device_info, password, bss)?,
device_info.mac_addr,
ProtectionInfo::Rsne(s_rsne),
bss.bssid,
ProtectionInfo::Rsne(a_rsne),
)
.map_err(|e| format_err!("failed to create ESS-SA: {:?}", e))?;
Ok(Protection::Rsna(Rsna { negotiated_protection, supplicant: Box::new(supplicant) }))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::fake_device_info;
use wlan_common::{assert_variant, fake_bss};
const CLIENT_ADDR: [u8; 6] = [0x7A, 0xE7, 0x76, 0xD9, 0xF2, 0x67];
#[test]
fn test_get_rsna_password_for_unprotected_network() {
let bss = fake_bss!(Open);
let credential = fidl_sme::Credential::Password("somepass".as_bytes().to_vec());
let rsna = get_wpa2_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss);
assert!(rsna.is_err(), "expect error when password is supplied for unprotected network")
}
#[test]
fn test_get_rsna_no_password_for_protected_network() {
let bss = fake_bss!(Wpa2);
let credential = fidl_sme::Credential::None(fidl_sme::Empty);
let rsna = get_wpa2_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss);
assert!(rsna.is_err(), "expect error when no password is supplied for protected network")
}
#[test]
fn test_get_rsna_psk() {
let bss = fake_bss!(Wpa2);
let credential = fidl_sme::Credential::Psk(vec![0xAA; 32]);
get_wpa2_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect("expected successful RSNA with valid PSK");
}
#[test]
fn test_wpa2_get_auth_method() {
let bss = fake_bss!(Wpa2);
let credential = fidl_sme::Credential::Psk(vec![0xAA; 32]);
let protection = get_wpa2_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect("expected successful RSNA with valid PSK");
assert_variant!(protection, Protection::Rsna(rsna) => {
assert_eq!(rsna.supplicant.get_auth_method(), auth::MethodName::Psk);
});
}
#[test]
fn test_wpa2_get_auth_config() {
let bss = fake_bss!(Wpa2);
let credential = fidl_sme::Credential::Psk(vec![0xAA; 32]);
let protection = get_wpa2_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect("expected successful RSNA with valid PSK");
assert_variant!(protection, Protection::Rsna(rsna) => {
assert_variant!(rsna.supplicant.get_auth_cfg(), auth::Config::ComputedPsk(_));
});
}
#[test]
fn test_get_rsna_invalid_psk() {
let bss = fake_bss!(Wpa2);
// PSK too short
let credential = fidl_sme::Credential::Psk(vec![0xAA; 31]);
get_wpa2_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect_err("expected RSNA failure with invalid PSK");
}
#[test]
fn test_get_rsna_wpa3() {
let bss = fake_bss!(Wpa3);
let credential = fidl_sme::Credential::Password(vec![0xBB; 8]);
get_wpa3_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect("expected successful SAE RSNA with valid credential");
}
#[test]
fn test_wpa3_get_auth_method() {
let bss = fake_bss!(Wpa3);
let credential = fidl_sme::Credential::Password(vec![0xBB; 8]);
let protection = get_wpa3_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect("expected successful SAE RSNA with valid credential");
assert_variant!(protection, Protection::Rsna(rsna) => {
assert_eq!(rsna.supplicant.get_auth_method(), auth::MethodName::Sae);
});
}
#[test]
fn test_wpa3_get_auth_config() {
let bss = fake_bss!(Wpa3);
let credential = fidl_sme::Credential::Password(vec![0xBB; 8]);
let protection = get_wpa3_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect("expected successful SAE RSNA with valid credential");
assert_variant!(protection, Protection::Rsna(rsna) => {
assert_variant!(rsna.supplicant.get_auth_cfg(), auth::Config::Sae { .. });
});
}
#[test]
fn test_get_rsna_wpa3_psk_fails() {
let bss = fake_bss!(Wpa3);
let credential = fidl_sme::Credential::Psk(vec![0xAA; 32]);
get_wpa3_rsna(&fake_device_info(CLIENT_ADDR), &credential, &bss)
.expect_err("expected WPA3 RSNA failure with PSK");
}
#[test]
fn test_wpa3_sme_auth_config() {
let bss = fake_bss!(Wpa3);
let mut device_info = fake_device_info([0xaa; 6]);
device_info.driver_features = vec![fidl_fuchsia_wlan_common::DriverFeature::SaeSmeAuth];
let auth_config = get_wpa3_auth_config(&device_info, vec![0xbb; 8], &bss)
.expect("Failed to create auth config");
assert_variant!(auth_config,
auth::Config::Sae { password, .. } => assert_eq!(password, vec![0xbb; 8]));
}
#[test]
fn test_wpa3_driver_sme_auth_config() {
let bss = fake_bss!(Wpa3);
let mut device_info = fake_device_info([0xaa; 6]);
device_info.driver_features = vec![fidl_fuchsia_wlan_common::DriverFeature::SaeDriverAuth];
let auth_config = get_wpa3_auth_config(&device_info, vec![0xbb; 8], &bss)
.expect("Failed to create auth config");
assert_variant!(auth_config,
auth::Config::DriverSae { password } => assert_eq!(password, vec![0xbb; 8]));
}
#[test]
fn test_wpa3_sme_auth_config_preferred() {
let bss = fake_bss!(Wpa3);
let mut device_info = fake_device_info([0xaa; 6]);
device_info.driver_features = vec![
fidl_fuchsia_wlan_common::DriverFeature::SaeSmeAuth,
fidl_fuchsia_wlan_common::DriverFeature::SaeDriverAuth,
];
let auth_config = get_wpa3_auth_config(&device_info, vec![0xbb; 8], &bss)
.expect("Failed to create auth config");
assert_variant!(auth_config,
auth::Config::Sae { password, .. } => assert_eq!(password, vec![0xbb; 8]));
}
#[test]
fn test_wpa3_invalid_auth_config() {
let bss = fake_bss!(Wpa3);
let mut device_info = fake_device_info([0xaa; 6]);
device_info.driver_features = vec![];
get_wpa3_auth_config(&device_info, vec![0xbb; 8], &bss)
.expect_err("Should not create auth config");
}
}
| {
self.negotiated_protection == other.negotiated_protection
} |
action.go | package remote
import (
"context"
"encoding/hex"
"fmt"
"os"
"path"
"runtime"
"sort"
"strings"
"time"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/tree"
pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/golang/protobuf/ptypes"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
// uploadAction uploads a build action for a target and returns its digest.
func (c *Client) uploadAction(target *core.BuildTarget, isTest, isRun bool) (*pb.Command, *pb.Digest, error) {
var command *pb.Command
var digest *pb.Digest
err := c.uploadBlobs(func(ch chan<- *chunker.Chunker) error {
defer close(ch)
inputRoot, err := c.uploadInputs(ch, target, isTest || isRun)
if err != nil {
return err
}
inputRootChunker, _ := chunker.NewFromProto(inputRoot, int(c.client.ChunkMaxSize))
ch <- inputRootChunker
command, err = c.buildCommand(target, inputRoot, isTest, isRun, target.Stamp)
if err != nil {
return err
}
commandChunker, _ := chunker.NewFromProto(command, int(c.client.ChunkMaxSize))
ch <- commandChunker
actionChunker, _ := chunker.NewFromProto(&pb.Action{
CommandDigest: commandChunker.Digest().ToProto(),
InputRootDigest: inputRootChunker.Digest().ToProto(),
Timeout: ptypes.DurationProto(timeout(target, isTest)),
}, int(c.client.ChunkMaxSize))
ch <- actionChunker
digest = actionChunker.Digest().ToProto()
return nil
})
return command, digest, err
}
// buildAction creates a build action for a target and returns the command and the action digest. No uploading is done.
func (c *Client) buildAction(target *core.BuildTarget, isTest, stamp bool) (*pb.Command, *pb.Digest, error) {
inputRoot, err := c.uploadInputs(nil, target, isTest)
if err != nil {
return nil, nil, err
}
inputRootDigest := c.digestMessage(inputRoot)
command, err := c.buildCommand(target, inputRoot, isTest, false, stamp)
if err != nil {
return nil, nil, err
}
commandDigest := c.digestMessage(command)
actionDigest := c.digestMessage(&pb.Action{
CommandDigest: commandDigest,
InputRootDigest: inputRootDigest,
Timeout: ptypes.DurationProto(timeout(target, isTest)),
})
return command, actionDigest, nil
}
// buildCommand builds the command for a single target.
func (c *Client) buildCommand(target *core.BuildTarget, inputRoot *pb.Directory, isTest, isRun, stamp bool) (*pb.Command, error) {
if isTest {
return c.buildTestCommand(target)
} else if isRun {
return c.buildRunCommand(target)
}
// We can't predict what variables like this should be so we sneakily bung something on
// the front of the command. It'd be nicer if there were a better way though...
var commandPrefix = "export TMP_DIR=\"`pwd`\" && "
// TODO(peterebden): Remove this nonsense once API v2.1 is released.
files, dirs := outputs(target)
if len(target.Outputs()) == 1 { // $OUT is relative when running remotely; make it absolute
commandPrefix += `export OUT="$TMP_DIR/$OUT" && `
}
if target.IsRemoteFile {
// Synthesize something for the Command proto. We never execute this, but it does get hashed for caching
// purposes so it's useful to have it be a minimal expression of what we care about (for example, it should
// not include the environment variables since we don't communicate those to the remote server).
return &pb.Command{
Arguments: []string{
"fetch", strings.Join(target.AllURLs(c.state.Config), " "), "verify", strings.Join(target.Hashes, " "),
},
OutputFiles: files,
OutputDirectories: dirs,
OutputPaths: append(files, dirs...),
}, nil
}
cmd := target.GetCommand(c.state)
if cmd == "" {
cmd = "true"
}
cmd, err := core.ReplaceSequences(c.state, target, cmd)
return &pb.Command{
Platform: c.platform,
// We have to run everything through bash since our commands are arbitrary.
// Unfortunately we can't just say "bash", we need an absolute path which is
// a bit weird since it assumes that our absolute path is the same as the
// remote one (which is probably OK on the same OS, but not between say Linux and
// FreeBSD where bash is not idiomatically in the same place).
Arguments: []string{
c.bashPath, "--noprofile", "--norc", "-u", "-o", "pipefail", "-c", commandPrefix + cmd,
},
EnvironmentVariables: c.buildEnv(target, c.stampedBuildEnvironment(target, inputRoot, stamp), target.Sandbox),
OutputFiles: files,
OutputDirectories: dirs,
OutputPaths: append(files, dirs...),
}, err
}
// stampedBuildEnvironment returns a build environment, optionally with a stamp if stamp is true.
func (c *Client) stampedBuildEnvironment(target *core.BuildTarget, inputRoot *pb.Directory, stamp bool) []string {
if target.IsFilegroup {
return core.GeneralBuildEnvironment(c.state.Config) // filegroups don't need a full build environment
} else if !stamp {
return core.BuildEnvironment(c.state, target, ".")
}
// We generate the stamp ourselves from the input root.
// TODO(peterebden): it should include the target properties too...
hash := c.sum(mustMarshal(inputRoot))
return core.StampedBuildEnvironment(c.state, target, hash, ".")
}
// buildTestCommand builds a command for a target when testing.
func (c *Client) buildTestCommand(target *core.BuildTarget) (*pb.Command, error) {
// TODO(peterebden): Remove all this nonsense once API v2.1 is released.
files := make([]string, 0, 2)
dirs := []string{}
if target.NeedCoverage(c.state) {
files = append(files, core.CoverageFile)
}
if !target.NoTestOutput {
if target.HasLabel(core.TestResultsDirLabel) {
dirs = []string{core.TestResultsFile}
} else {
files = append(files, core.TestResultsFile)
}
}
const commandPrefix = "export TMP_DIR=\"`pwd`\" TEST_DIR=\"`pwd`\" && "
cmd, err := core.ReplaceTestSequences(c.state, target, target.GetTestCommand(c.state))
if len(c.state.TestArgs) != 0 {
cmd += " " + strings.Join(c.state.TestArgs, " ")
}
return &pb.Command{
Platform: &pb.Platform{
Properties: []*pb.Platform_Property{
{
Name: "OSFamily",
Value: translateOS(target.Subrepo),
},
},
},
Arguments: []string{
c.bashPath, "--noprofile", "--norc", "-u", "-o", "pipefail", "-c", commandPrefix + cmd,
},
EnvironmentVariables: c.buildEnv(nil, core.TestEnvironment(c.state, target, "."), target.TestSandbox),
OutputFiles: files,
OutputDirectories: dirs,
OutputPaths: append(files, dirs...),
}, err
}
// buildRunCommand builds the command to run a target remotely.
func (c *Client) buildRunCommand(target *core.BuildTarget) (*pb.Command, error) {
outs := target.Outputs()
if len(outs) == 0 {
return nil, fmt.Errorf("Target %s has no outputs, it can't be run with `plz run`", target)
}
return &pb.Command{
Platform: c.platform,
Arguments: outs,
EnvironmentVariables: c.buildEnv(target, core.GeneralBuildEnvironment(c.state.Config), false),
}, nil
}
// uploadInputs finds and uploads a set of inputs from a target.
func (c *Client) uploadInputs(ch chan<- *chunker.Chunker, target *core.BuildTarget, isTest bool) (*pb.Directory, error) {
if target.IsRemoteFile {
return &pb.Directory{}, nil
}
b, err := c.uploadInputDir(ch, target, isTest)
if err != nil {
return nil, err
}
return b.Root(ch), nil
}
func (c *Client) uploadInputDir(ch chan<- *chunker.Chunker, target *core.BuildTarget, isTest bool) (*dirBuilder, error) {
b := newDirBuilder(c)
for input := range c.iterInputs(target, isTest, target.IsFilegroup) {
if l := input.Label(); l != nil {
o := c.targetOutputs(*l)
if o == nil {
if dep := c.state.Graph.TargetOrDie(*l); dep.Local {
// We have built this locally, need to upload its outputs
if err := c.uploadLocalTarget(dep); err != nil {
return nil, err
}
o = c.targetOutputs(*l)
} else {
// Classic "we shouldn't get here" stuff
return nil, fmt.Errorf("Outputs not known for %s (should be built by now)", *l)
}
}
pkgName := l.PackageName
if target.IsFilegroup {
pkgName = target.Label.PackageName
} else if isTest && *l == target.Label {
// At test time the target itself is put at the root rather than in the normal dir.
// This is just How Things Are, so mimic it here.
pkgName = "."
}
// Recall that (as noted in setOutputs) these can have full paths on them, which
// we now need to sort out again to create well-formed Directory protos.
for _, f := range o.Files {
d := b.Dir(path.Join(pkgName, path.Dir(f.Name)))
d.Files = append(d.Files, &pb.FileNode{
Name: path.Base(f.Name),
Digest: f.Digest,
IsExecutable: f.IsExecutable,
})
}
for _, d := range o.Directories {
dir := b.Dir(path.Join(pkgName, path.Dir(d.Name)))
dir.Directories = append(dir.Directories, &pb.DirectoryNode{
Name: path.Base(d.Name),
Digest: d.Digest,
})
if target.IsFilegroup {
if err := c.addChildDirs(b, path.Join(pkgName, d.Name), d.Digest); err != nil {
return b, err
}
}
}
for _, s := range o.Symlinks {
d := b.Dir(path.Join(pkgName, path.Dir(s.Name)))
d.Symlinks = append(d.Symlinks, &pb.SymlinkNode{
Name: path.Base(s.Name),
Target: s.Target,
})
}
continue
}
if err := c.uploadInput(b, ch, input); err != nil {
return nil, err
}
}
if !isTest && target.Stamp {
stamp := core.StampFile(target)
chomk := chunker.NewFromBlob(stamp, int(c.client.ChunkMaxSize))
if ch != nil |
d := b.Dir(".")
d.Files = append(d.Files, &pb.FileNode{
Name: target.StampFileName(),
Digest: chomk.Digest().ToProto(),
})
}
return b, nil
}
// addChildDirs adds a set of child directories to a builder.
func (c *Client) addChildDirs(b *dirBuilder, name string, dg *pb.Digest) error {
dir := &pb.Directory{}
if err := c.client.ReadProto(context.Background(), digest.NewFromProtoUnvalidated(dg), dir); err != nil {
return err
}
d := b.Dir(name)
d.Directories = append(d.Directories, dir.Directories...)
d.Files = append(d.Files, dir.Files...)
d.Symlinks = append(d.Symlinks, dir.Symlinks...)
for _, subdir := range dir.Directories {
if err := c.addChildDirs(b, path.Join(name, subdir.Name), subdir.Digest); err != nil {
return err
}
}
return nil
}
// uploadInput finds and uploads a single input.
func (c *Client) uploadInput(b *dirBuilder, ch chan<- *chunker.Chunker, input core.BuildInput) error {
if _, ok := input.(core.SystemPathLabel); ok {
return nil // Don't need to upload things off the system (the remote is expected to have them)
}
fullPaths := input.FullPaths(c.state.Graph)
for i, out := range input.Paths(c.state.Graph) {
in := fullPaths[i]
if err := fs.Walk(in, func(name string, isDir bool) error {
if isDir {
return nil // nothing to do
}
dest := path.Join(out, name[len(in):])
d := b.Dir(path.Dir(dest))
// Now handle the file itself
info, err := os.Lstat(name)
if err != nil {
return err
}
if info.Mode()&os.ModeSymlink != 0 {
link, err := os.Readlink(name)
if err != nil {
return err
}
d.Symlinks = append(d.Symlinks, &pb.SymlinkNode{
Name: path.Base(dest),
Target: link,
})
return nil
}
h, err := c.state.PathHasher.Hash(name, false, true)
if err != nil {
return err
}
dg := &pb.Digest{
Hash: hex.EncodeToString(h),
SizeBytes: info.Size(),
}
d.Files = append(d.Files, &pb.FileNode{
Name: path.Base(dest),
Digest: dg,
IsExecutable: info.Mode()&0100 != 0,
})
if ch != nil {
ch <- chunker.NewFromFile(name, digest.NewFromProtoUnvalidated(dg), int(c.client.ChunkMaxSize))
}
return nil
}); err != nil {
return err
}
}
return nil
}
// iterInputs yields all the input files needed for a target.
func (c *Client) iterInputs(target *core.BuildTarget, isTest, isFilegroup bool) <-chan core.BuildInput {
if !isTest {
return core.IterInputs(c.state.Graph, target, true, isFilegroup)
}
ch := make(chan core.BuildInput)
go func() {
ch <- target.Label
for _, datum := range target.AllData() {
ch <- datum
}
close(ch)
}()
return ch
}
// buildMetadata converts an ActionResult into one of our BuildMetadata protos.
// N.B. this always returns a non-nil metadata object for the first response.
func (c *Client) buildMetadata(ar *pb.ActionResult, needStdout, needStderr bool) (*core.BuildMetadata, error) {
metadata := &core.BuildMetadata{
Stdout: ar.StdoutRaw,
Stderr: ar.StderrRaw,
}
if needStdout && len(metadata.Stdout) == 0 && ar.StdoutDigest != nil {
b, err := c.client.ReadBlob(context.Background(), digest.NewFromProtoUnvalidated(ar.StdoutDigest))
if err != nil {
return metadata, err
}
metadata.Stdout = b
}
if needStderr && len(metadata.Stderr) == 0 && ar.StderrDigest != nil {
b, err := c.client.ReadBlob(context.Background(), digest.NewFromProtoUnvalidated(ar.StderrDigest))
if err != nil {
return metadata, err
}
metadata.Stderr = b
}
return metadata, nil
}
// digestForFilename returns the digest for an output of the given name, or nil if it doesn't exist.
func (c *Client) digestForFilename(ar *pb.ActionResult, name string) *pb.Digest {
for _, file := range ar.OutputFiles {
if file.Path == name {
return file.Digest
}
}
return nil
}
// downloadAllFiles returns the contents of all files in the given action result
func (c *Client) downloadAllPrefixedFiles(ar *pb.ActionResult, prefix string) ([][]byte, error) {
outs, err := c.client.FlattenActionOutputs(context.Background(), ar)
if err != nil {
return nil, err
}
ret := [][]byte{}
for name, out := range outs {
if strings.HasPrefix(name, prefix) {
blob, err := c.client.ReadBlob(context.Background(), out.Digest)
if err != nil {
return nil, err
}
ret = append(ret, blob)
}
}
return ret, nil
}
// verifyActionResult verifies that all the requested outputs actually exist in a returned
// ActionResult. Servers do not necessarily verify this but we need to make sure they are
// complete for future requests.
func (c *Client) verifyActionResult(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest, ar *pb.ActionResult, verifyOutputs bool) error {
outs := make(map[string]bool, len(ar.OutputFiles)+len(ar.OutputDirectories)+len(ar.OutputFileSymlinks)+len(ar.OutputDirectorySymlinks))
for _, f := range ar.OutputFiles {
outs[f.Path] = true
}
for _, f := range ar.OutputDirectories {
outs[f.Path] = true
}
for _, f := range ar.OutputFileSymlinks {
outs[f.Path] = true
}
for _, f := range ar.OutputDirectorySymlinks {
outs[f.Path] = true
}
for _, out := range command.OutputFiles {
if !outs[out] {
return fmt.Errorf("Remote build action for %s failed to produce output %s%s", target, out, c.actionURL(actionDigest, true))
}
}
for _, out := range command.OutputDirectories {
if !outs[out] {
return fmt.Errorf("Remote build action for %s failed to produce output %s%s", target, out, c.actionURL(actionDigest, true))
}
}
if !verifyOutputs {
return nil
}
start := time.Now()
// Do more in-depth validation that blobs exist remotely.
outputs, err := c.client.FlattenActionOutputs(context.Background(), ar)
if err != nil {
return fmt.Errorf("Failed to verify action result: %s", err)
}
// At this point it's verified all the directories, but not the files themselves.
digests := make([]digest.Digest, 0, len(outputs))
for _, output := range outputs {
// FlattenTree doesn't populate the digest in for empty dirs... we don't need to check them anyway
if !output.IsEmptyDirectory {
digests = append(digests, output.Digest)
}
}
if missing, err := c.client.MissingBlobs(context.Background(), digests); err != nil {
return fmt.Errorf("Failed to verify action result outputs: %s", err)
} else if len(missing) != 0 {
return fmt.Errorf("Action result missing %d blobs", len(missing))
}
log.Debug("Verified action result for %s in %s", target, time.Since(start))
return nil
}
// uploadLocalTarget uploads the outputs of a target that was built locally.
func (c *Client) uploadLocalTarget(target *core.BuildTarget) error {
m, ar, err := tree.ComputeOutputsToUpload(target.OutDir(), target.Outputs(), int(c.client.ChunkMaxSize), filemetadata.NewNoopCache())
if err != nil {
return err
}
chomks := make([]*chunker.Chunker, 0, len(m))
for _, c := range m {
chomks = append(chomks, c)
}
if err := c.client.UploadIfMissing(context.Background(), chomks...); err != nil {
return err
}
return c.setOutputs(target, ar)
}
// translateOS converts the OS name of a subrepo into a Bazel-style OS name.
func translateOS(subrepo *core.Subrepo) string {
if subrepo == nil {
return reallyTranslateOS(runtime.GOOS)
}
return reallyTranslateOS(subrepo.Arch.OS)
}
func reallyTranslateOS(os string) string {
switch os {
case "darwin":
return "macos"
default:
return os
}
}
// buildEnv translates the set of environment variables for this target to a proto.
func (c *Client) buildEnv(target *core.BuildTarget, env []string, sandbox bool) []*pb.Command_EnvironmentVariable {
if sandbox {
env = append(env, "SANDBOX=true")
}
if target != nil {
if target.IsBinary {
env = append(env, "_BINARY=true")
}
}
sort.Strings(env) // Proto says it must be sorted (not just consistently ordered :( )
vars := make([]*pb.Command_EnvironmentVariable, len(env))
for i, e := range env {
idx := strings.IndexByte(e, '=')
vars[i] = &pb.Command_EnvironmentVariable{
Name: e[:idx],
Value: e[idx+1:],
}
}
return vars
}
| {
ch <- chomk
} |
docClient.js | (function () {
'use strict';
var gulp = require('gulp');
var jsdoc = require('gulp-jsdoc3');
var path = require('path');
var baseDir = '../../../';
var currentPath = path.join(__dirname, baseDir);
var getPath = function (_path) {
return currentPath + _path;
};
var docClient = function () {
gulp.task('doc-client', function () {
return gulp.src(['!' + getPath('app/src/client/lib/**'), getPath('README.md'), getPath('app/src/client/**/*.js')], {read: false})
.pipe(jsdoc({
opts: {
destination: getPath('app/build/client/doc/client')
}
})); |
gulp.task('doc-client-watch', function () {
gulp.watch(['!' + getPath('app/src/client/lib/**'), getPath('README.md'), getPath('app/src/client/**/*.js')], ['doc-client']);
});
};
module.exports = docClient;
})(); | }); |
Migration029_test.go | package migrations_test
import (
"database/sql"
"io/ioutil"
"testing"
"github.com/OpenBazaar/openbazaar-go/repo/migrations"
"github.com/OpenBazaar/openbazaar-go/schema"
)
var stmt = `PRAGMA key = 'letmein';
create table sales (orderID text primary key not null,
contract blob, state integer, read integer,
timestamp integer, total integer, thumbnail text,
buyerID text, buyerHandle text, title text,
shippingName text, shippingAddress text,
paymentAddr text, funded integer, transactions blob,
needsSync integer, lastDisputeTimeoutNotifiedAt integer not null default 0,
coinType not null default '', paymentCoin not null default '');
create table purchases (orderID text primary key not null,
contract blob, state integer, read integer,
timestamp integer, total integer, thumbnail text,
vendorID text, vendorHandle text, title text,
shippingName text, shippingAddress text, paymentAddr text,
funded integer, transactions blob,
lastDisputeTimeoutNotifiedAt integer not null default 0,
lastDisputeExpiryNotifiedAt integer not null default 0,
disputedAt integer not null default 0, coinType not null default '',
paymentCoin not null default '');
create table inventory (invID text primary key not null, slug text, variantIndex integer, count integer);`
func TestMigration029(t *testing.T) {
basePath := schema.GenerateTempPath()
appSchema, err := schema.NewCustomSchemaManager(schema.SchemaContext{DataPath: basePath, TestModeEnabled: true})
if err != nil {
t.Fatal(err)
}
if err = appSchema.BuildSchemaDirectories(); err != nil {
t.Fatal(err)
}
defer appSchema.DestroySchemaDirectories()
var dbPath = appSchema.DataPathJoin("datastore", "mainnet.db")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Fatal(err)
}
if _, err := db.Exec(stmt); err != nil {
t.Fatal(err)
}
if _, err := db.Exec("INSERT INTO sales (orderID, total) values (?,?)", "asdf", 3); err != nil {
t.Fatal(err)
}
if _, err := db.Exec("INSERT INTO purchases (orderID, total) values (?,?)", "asdf", 3); err != nil {
t.Fatal(err)
}
if _, err := db.Exec("INSERT INTO inventory (invID, count) values (?,?)", "asdf", "3"); err != nil {
t.Fatal(err)
}
var m migrations.Migration029
if err := m.Up(basePath, "letmein", false); err != nil {
t.Fatal(err)
}
var (
orderID string
total string
total1 int
invID string
count string
count1 int
)
r := db.QueryRow("select orderID, total from sales where orderID=?", "asdf")
if err := r.Scan(&orderID, &total); err != nil {
t.Error(err)
}
if total != "3" {
t.Errorf("expected total to be 3, but was %s", total)
}
r = db.QueryRow("select orderID, total from purchases where orderID=?", "asdf")
if err := r.Scan(&orderID, &total); err != nil {
t.Error(err)
}
if total != "3" { | }
r = db.QueryRow("select invID, count from inventory where invID=?", "asdf")
if err := r.Scan(&invID, &count); err != nil {
t.Error(err)
}
if count != "3" {
t.Errorf("expected count to be 3, but was %s", total)
}
repoVer, err := ioutil.ReadFile(appSchema.DataPathJoin("repover"))
if err != nil {
t.Error(err)
}
if string(repoVer) != "30" {
t.Error("Failed to write new repo version")
}
err = m.Down(basePath, "letmein", false)
if err != nil {
t.Fatal(err)
}
r = db.QueryRow("select orderID, total from sales where orderID=?", "asdf")
if err := r.Scan(&orderID, &total1); err != nil {
t.Error(err)
}
if total1 != 3 {
t.Errorf("expected total to be 3, but was %d", total1)
}
r = db.QueryRow("select orderID, total from purchases where orderID=?", "asdf")
if err := r.Scan(&orderID, &total1); err != nil {
t.Error(err)
}
if total1 != 3 {
t.Errorf("expected total to be 3, but was %d", total1)
}
r = db.QueryRow("select invID, count from inventory where invID=?", "asdf")
if err := r.Scan(&invID, &count1); err != nil {
t.Error(err)
}
if count1 != 3 {
t.Errorf("expected count to be 3, but was %d", total1)
}
repoVer, err = ioutil.ReadFile(appSchema.DataPathJoin("repover"))
if err != nil {
t.Error(err)
}
if string(repoVer) != "29" {
t.Error("Failed to write old repo version")
}
} | t.Errorf("expected total to be 3, but was %s", total) |
items.go | package model
| Reason string `json:"reason"`
} | type DeploymentStatus struct {
Name string `json:"name"`
Completed bool `json:"completed"`
Status string `json:"status"` |
owner.rs | use diesel::pg::Pg;
use diesel::prelude::*;
use crate::app::App;
use crate::util::errors::{cargo_err, AppResult};
use crate::models::{Crate, Team, User};
use crate::schema::{crate_owners, users};
#[derive(Insertable, Associations, Identifiable, Debug, Clone, Copy)]
#[belongs_to(Crate)]
#[belongs_to(User, foreign_key = "owner_id")]
#[belongs_to(Team, foreign_key = "owner_id")]
#[table_name = "crate_owners"]
#[primary_key(crate_id, owner_id, owner_kind)]
pub struct CrateOwner {
pub crate_id: i32,
pub owner_id: i32,
pub created_by: i32,
pub owner_kind: i32,
pub email_notifications: bool,
}
type BoxedQuery<'a> = crate_owners::BoxedQuery<'a, Pg, crate_owners::SqlType>;
impl CrateOwner { |
crate_owners
.filter(deleted.eq(false))
.filter(owner_kind.eq(kind as i32))
.into_boxed()
}
}
#[derive(Debug, Clone, Copy)]
#[repr(u32)]
pub enum OwnerKind {
User = 0,
Team = 1,
}
/// Unifies the notion of a User or a Team.
#[derive(Debug)]
pub enum Owner {
User(User),
Team(Team),
}
impl Owner {
/// Finds the owner by name. Always recreates teams to get the most
/// up-to-date GitHub ID. Fails out if the user isn't found in the
/// database, the team isn't found on GitHub, or if the user isn't a member
/// of the team on GitHub.
/// May be a user's GH login or a full team name. This is case
/// sensitive.
pub fn find_or_create_by_login(
app: &App,
conn: &PgConnection,
req_user: &User,
name: &str,
) -> AppResult<Owner> {
if name.contains(':') {
Ok(Owner::Team(Team::create_or_update(
app, conn, name, req_user,
)?))
} else {
users::table
.filter(crate::lower(users::gh_login).eq(name.to_lowercase()))
.filter(users::gh_id.ne(-1))
.order(users::gh_id.desc())
.first(conn)
.map(Owner::User)
.map_err(|_| cargo_err(&format_args!("could not find user with login `{}`", name)))
}
}
pub fn kind(&self) -> i32 {
match *self {
Owner::User(_) => OwnerKind::User as i32,
Owner::Team(_) => OwnerKind::Team as i32,
}
}
pub fn login(&self) -> &str {
match *self {
Owner::User(ref user) => &user.gh_login,
Owner::Team(ref team) => &team.login,
}
}
pub fn id(&self) -> i32 {
match *self {
Owner::User(ref user) => user.id,
Owner::Team(ref team) => team.id,
}
}
} | /// Returns a base crate owner query filtered by the owner kind argument. This query also
/// filters out deleted records.
pub fn by_owner_kind(kind: OwnerKind) -> BoxedQuery<'static> {
use self::crate_owners::dsl::*; |
platformer.rs | use bevy::prelude::*;
use bevy_physimple::prelude::*;
#[derive(Default, Component)]
pub struct Player {
double_jump: bool,
on_wall: Option<Vec2>,
on_floor: bool
}
pub struct Gravity(Vec2);
fn main() {
let mut app = App::new();
app // Basic setup of the app
.insert_resource(WindowDescriptor {
title: "A cool name for an example".to_string(),
..Default::default()
})
.add_plugins(DefaultPlugins)
.add_plugin(Physics2dPlugin)
.add_system(bevy::input::system::exit_on_esc_system.system())
;
app // startup systems
.add_startup_system(setup.system())
;
app // systems
.add_system(controller_on_stuff.system())
.add_system(character_system.system())
.add_system(change_sensor_color.system())
.add_system(gravity.system())
.add_system(ray_head.system())
;
app.run();
}
fn setup(
mut coms: Commands,
a_server: Res<AssetServer>,
) {
let wall = Color::BLACK;
// insert a gravity struct
coms.insert_resource(Gravity(Vec2::new(0.0,-540.0)));
// Spawn the damn camera
coms.spawn_bundle(OrthographicCameraBundle::new_2d());
// Controls
let style = TextStyle {
font: a_server.load("fonts/FiraSans-Bold.ttf"),
font_size: 32.0,
color: Color::ANTIQUE_WHITE,
};
let alignment = TextAlignment {
vertical: VerticalAlign::Bottom,
horizontal: HorizontalAlign::Left,
};
let text = "A/D - Movement\nSpace/W - Jump/Double jump\nS - Stomp(when mid air)";
coms
.spawn_bundle(Text2dBundle {
text: Text::with_section(text, style, alignment),
transform: Transform::from_xyz(-270.0, 360.0, 0.0),
..Default::default()
})
;
// Spawn character
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::splat(28.0)),
color: Color::ALICE_BLUE,
..Default::default()
},
..Default::default()
})
.insert_bundle(KinematicBundle {
shape: CollisionShape::Square(Square::size(Vec2::splat(28.0))),
..Default::default()
})
.insert(Player::default())
.insert(
RayCast::new(Vec2::new(100.0,0.0))
.with_offset(Vec2::new(14.0,0.0)) // Gonna offset our ray
.with_static(true) // Let it collide with static bodies
)
.with_children(|p| {
// We gonna push a little cube for the ray's head
p.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::splat(10.0)),
color: Color::MIDNIGHT_BLUE,
..Default::default()
},
..Default::default()
});
})
;
// center floor
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::new(600.0, 30.0)),
color: wall,
..Default::default()
},
transform: Transform::from_xyz(150.0, -200.0, 0.0),
..Default::default()
})
.insert_bundle(StaticBundle {
shape: CollisionShape::Square(Square::size(Vec2::new(600.0, 30.0))),
..Default::default()
})
;
// side wall
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::new(40.0, 300.0)),
color: wall,
..Default::default()
},
transform: {
let mut t = Transform::from_xyz(450.0, 0.0, 0.0);
t.rotation = Quat::from_rotation_z(-0.1 * 3.14);
t
},
..Default::default()
})
.insert_bundle(StaticBundle {
shape: CollisionShape::Square(Square::size(Vec2::new(40.0, 300.0))),
..Default::default()
})
;
// smaller other side wall
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::new(30.0, 90.0)),
color: wall,
..Default::default()
},
transform: Transform::from_xyz(-150.0, -160.0, 0.0),
..Default::default()
})
.insert_bundle(StaticBundle {
shape: CollisionShape::Square(Square::size(Vec2::new(30.0,90.0))),
..Default::default()
})
;
// Floating platform
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::new(200.0,30.0)),
color: wall,
..Default::default()
},
transform: Transform::from_xyz(-150.0, 0.0,0.0),
..Default::default()
})
.insert_bundle(StaticBundle {
shape: CollisionShape::Square(Square::size(Vec2::new(200.0, 30.0))),
..Default::default()
})
;
// Spawn the sensor
const SENSOR_SIZE: f32 = 50.0;
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::splat(SENSOR_SIZE)),
color: Color::GOLD,
..Default::default()
},
transform: Transform::from_xyz(30.0, -150.0, 0.0),
..Default::default()
})
.insert_bundle(SensorBundle {
shape: CollisionShape::Square(Square::size(Vec2::splat(SENSOR_SIZE))),
..Default::default()
});
// Spawn another cube which we will try to push or something
const CUBE_SIZE: f32 = 35.0;
coms
.spawn_bundle(SpriteBundle {
sprite: Sprite {
custom_size: Some(Vec2::splat(CUBE_SIZE)),
color: Color::CRIMSON,
..Default::default()
},
transform: Transform::from_xyz(100.0,0.0,0.0),
..Default::default()
})
.insert_bundle(KinematicBundle {
shape: CollisionShape::Square(Square::size(Vec2::splat(CUBE_SIZE))),
..Default::default()
})
;
}
fn gravity(
time: Res<Time>,
grav: Res<Gravity>,
mut q: Query<&mut Vel>,
) {
// Since the lib itself doesnt take care of gravity(for obv reasons) we need to do it here
let g = grav.0;
let t = time.delta_seconds();
for mut v in q.iter_mut() {
v.0 += t * g;
}
}
fn controller_on_stuff(
mut query: Query<(Entity, &mut Player)>,
mut colls: EventReader<CollisionEvent>,
) {
// Iterate over the collisions and check if the player is on a wall/floor
let (e, mut c) = query.single_mut();
// clear the current data on c
c.on_floor = false;
c.on_wall = None;
for coll in colls.iter().filter(|&c| c.is_b_static) {
if coll.entity_a == e {
let n = coll.normal.dot(Vec2::Y);
if n > 0.7 {
c.on_floor = true;
}
else if n.abs() <= 0.7 {
c.on_wall = Some(coll.normal);
}
}
}
}
fn character_system(
input: Res<Input<KeyCode>>,
time: Res<Time>,
gravity: Res<Gravity>,
mut query: Query<(&mut Player, &mut Vel)>,
) {
let gravity = gravity.0;
for (mut controller, mut vel) in query.iter_mut() {
if let Some(normal) = controller.on_wall {
// If we are colliding with a wall, make sure to stick
vel.0 -= normal * 0.1;
// and limit our speed downwards
if vel.0.y < -1.0 {
vel.0.y = -1.0;
}
}
// There are 2 places in which we apply a jump, so i made a little colsure for code reusability
let jump = |body: &Player, vel: &mut Vel| {
vel.0 = vel.0.slide(gravity.normalize()) - gravity * 0.6;
let wall = body.on_wall.unwrap_or(Vec2::ZERO) * 250.0;
vel.0 += wall;
};
let should_jump = input.just_pressed(KeyCode::Space) || input.just_pressed(KeyCode::W);
if controller.on_floor || controller.on_wall.is_some() {
controller.double_jump = true;
if should_jump {
jump(&controller, &mut vel);
}
}
else if controller.double_jump && should_jump {
controller.double_jump = false;
jump(&controller, &mut vel);
}
// This is for the testing purpose of the continuous collision - aka "The Stomp"
if input.just_pressed(KeyCode::S) && !controller.on_floor {
vel.0 = Vec2::new(0.0, -5000.0);
}
// REMINDER: Dont forget to multiply by `time.delta_seconds()` when messing with movement
let acc = Vec2::new(1000.0, 0.0) * time.delta_seconds();
if input.pressed(KeyCode::A) {
vel.0 -= acc;
}
else if input.pressed(KeyCode::D) {
vel.0 += acc;
}
else {
// This is not a good way to do friction
vel.0.x *= 1.0 - (10.0 * time.delta_seconds());
}
// terminal velocity
const TERMINAL_X: f32 = 500.0;
if vel.0.x.abs() > TERMINAL_X {
vel.0.x = TERMINAL_X.copysign(vel.0.x); // you can also do `TERMINAL_X * vel.0.x.signum()`
}
}
}
fn change_sensor_color(
mut q: Query<(&Sensor, &mut Sprite)>,
) {
// Simply change the color of the sensor if something is inside it
for (s, mut h) in q.iter_mut() {
h.color = if s.bodies.len() == 0 {
Color::GOLD
}
else {
Color::rgba(0.0, 0.5, 1.0, 0.5)
}
}
}
fn ray_head(
mut ts: Query<&mut Transform, Without<RayCast>>,
q: Query<(&RayCast, &Children, &Transform)>,
) | {
for (r,c, rt) in q.iter() {
if let Some(c) = c.first() {
if let Ok(mut t) = ts.get_mut(*c) {
// We use the offset in the `unwrap_or` because we want to offset the position to be where the ray "ends"
// while in the `map`(and `pos` by extension) we want the position relative to the transform component
// since `a.collision_point` is in global space
let pos = Vec2::new(rt.translation.x, rt.translation.y);
t.translation = r.collision.map(|a| a.collision_point - pos).unwrap_or(r.cast + r.offset).extend(0.0);
}
}
}
} |
|
classes_a.js | var searchData= | ['profiler',['Profiler',['../class_utility_1_1_profiler.html',1,'Utility']]],
['protocol',['Protocol',['../class_p_l_e_n2_1_1_protocol.html',1,'PLEN2']]]
]; | [ |
read.go | package pool
import (
"time"
"github.com/decred/dcrlnd/buffer"
)
// Read is a worker pool specifically designed for sharing access to buffer.Read
// objects amongst a set of worker goroutines. This enables an application to
// limit the total number of buffer.Read objects allocated at any given time.
type Read struct {
workerPool *Worker
bufferPool *ReadBuffer
}
// NewRead creates a new Read pool, using an underlying ReadBuffer pool to
// recycle buffer.Read objects across the lifetime of the Read pool's workers.
func NewRead(readBufferPool *ReadBuffer, numWorkers int,
workerTimeout time.Duration) *Read |
// Start safely spins up the Read pool.
func (r *Read) Start() error {
return r.workerPool.Start()
}
// Stop safely shuts down the Read pool.
func (r *Read) Stop() error {
return r.workerPool.Stop()
}
// Submit accepts a function closure that provides access to the fresh
// buffer.Read object. The function's execution will be allocated to one of the
// underlying Worker pool's goroutines.
func (r *Read) Submit(inner func(*buffer.Read) error) error {
return r.workerPool.Submit(func(s WorkerState) error {
state := s.(*readWorkerState)
return inner(state.readBuf)
})
}
// readWorkerState is the per-goroutine state maintained by a Read pool's
// goroutines.
type readWorkerState struct {
// bufferPool is the pool to which the readBuf will be returned when the
// goroutine exits.
bufferPool *ReadBuffer
// readBuf is a buffer taken from the bufferPool on initialization,
// which will be cleaned and provided to any tasks that the goroutine
// processes before exiting.
readBuf *buffer.Read
}
// newWorkerState initializes a new readWorkerState, which will be called
// whenever a new goroutine is allocated to begin processing read tasks.
func (r *Read) newWorkerState() WorkerState {
return &readWorkerState{
bufferPool: r.bufferPool,
readBuf: r.bufferPool.Take(),
}
}
// Cleanup returns the readBuf to the underlying buffer pool, and removes the
// goroutine's reference to the readBuf.
func (r *readWorkerState) Cleanup() {
r.bufferPool.Return(r.readBuf)
r.readBuf = nil
}
// Reset recycles the readBuf to make it ready for any subsequent tasks the
// goroutine may process.
func (r *readWorkerState) Reset() {
r.readBuf.Recycle()
}
| {
r := &Read{
bufferPool: readBufferPool,
}
r.workerPool = NewWorker(&WorkerConfig{
NewWorkerState: r.newWorkerState,
NumWorkers: numWorkers,
WorkerTimeout: workerTimeout,
})
return r
} |
test.module.base.ts | /*
------------------------------------------------------------------------------
This code was generated by Amplication.
Changes to this file will be lost if the code is regenerated.
There are other ways to to customize your code, see this doc to learn more
https://docs.amplication.com/docs/how-to/custom-code
------------------------------------------------------------------------------
*/
import { Module, forwardRef } from "@nestjs/common";
import { MorganModule } from "nest-morgan";
import { PrismaModule } from "nestjs-prisma";
import { ACLModule } from "../../auth/acl.module";
import { AuthModule } from "../../auth/auth.module";
| @Module({
imports: [
ACLModule,
forwardRef(() => AuthModule),
MorganModule,
PrismaModule,
],
exports: [ACLModule, AuthModule, MorganModule, PrismaModule],
})
export class TestModuleBase {} | |
metrics_controller.go | package controllers
import (
"net"
"net/http"
"strconv"
"sync"
"time"
"github.com/cloudnativelabs/kube-router/app/options"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/net/context"
"k8s.io/client-go/kubernetes"
)
var (
serviceTotalConn = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_total_connections",
Help: "Total incoming connections made",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
servicePacketsIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_packets_in",
Help: "Total incoming packets",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
servicePacketsOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_packets_out",
Help: "Total outgoing packets",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
serviceBytesIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_bytes_in",
Help: "Total incoming bytes",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
serviceBytesOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_bytes_out",
Help: "Total outgoing bytes",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
servicePpsIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_pps_in",
Help: "Incoming packets per second",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
servicePpsOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_pps_out",
Help: "Outgoing packets per second",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
serviceCPS = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_cps",
Help: "Service connections per second",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
serviceBpsIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_bps_in",
Help: "Incoming bytes per second",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
serviceBpsOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "service_bps_out",
Help: "Outgoing bytes per second",
}, []string{"namespace", "service_name", "service_vip", "protocol", "port"})
controllerIpvsServices = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_ipvs_services",
Help: "Number of ipvs services in the instance",
}, []string{})
controllerIptablesSyncTime = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_iptables_sync_time",
Help: "Time it took for controller to sync iptables",
}, []string{})
controllerPublishMetricsTime = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_publish_metrics_time",
Help: "Time it took to publish metrics",
}, []string{})
controllerIpvsServicesSyncTime = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_ipvs_services_sync_time",
Help: "Time it took for controller to sync ipvs services",
}, []string{})
controllerBPGpeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_bgp_peers",
Help: "BGP peers in the runtime configuration",
}, []string{})
controllerBGPInternalPeersSyncTime = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_bgp_internal_peers_sync_time",
Help: "Time it took to sync internal bgp peers",
}, []string{})
controllerBGPadvertisementsReceived = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_bgp_advertisements_received",
Help: "Time it took to sync internal bgp peers",
}, []string{})
controllerIpvsMetricsExportTime = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "controller_ipvs_metrics_export_time",
Help: "Time it took to export metrics",
}, []string{})
)
// MetricsController Holds settings for the metrics controller
type MetricsController struct {
endpointsMap endpointsInfoMap
MetricsPath string
MetricsPort uint16
mu sync.Mutex
nodeIP net.IP
serviceMap serviceInfoMap
}
// Run prometheus metrics controller
func (mc *MetricsController) Run(healthChan chan<- *ControllerHeartbeat, stopCh <-chan struct{}, wg *sync.WaitGroup) error {
t := time.NewTicker(3 * time.Second)
defer wg.Done()
glog.Info("Starting metrics controller")
// register metrics for this controller
prometheus.MustRegister(controllerIpvsMetricsExportTime)
srv := &http.Server{Addr: ":" + strconv.Itoa(int(mc.MetricsPort)), Handler: http.DefaultServeMux}
// add prometheus handler on metrics path
http.Handle(mc.MetricsPath, promhttp.Handler())
go func() {
if err := srv.ListenAndServe(); err != nil {
// cannot panic, because this probably is an intentional close
glog.Errorf("Metrics controller error: %s", err)
}
}()
for {
sendHeartBeat(healthChan, "MC")
select {
case <-stopCh:
glog.Infof("Shutting down metrics controller")
if err := srv.Shutdown(context.Background()); err != nil {
glog.Errorf("could not shutdown: %v", err)
} | case <-t.C:
glog.V(4).Info("Metrics controller tick")
}
}
}
// NewMetricsController returns new MetricController object
func NewMetricsController(clientset *kubernetes.Clientset, config *options.KubeRouterConfig) (*MetricsController, error) {
mc := MetricsController{}
mc.MetricsPath = config.MetricsPath
mc.MetricsPort = config.MetricsPort
return &mc, nil
} | return nil |
App.py | from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QImage, QPixmap, QPalette, qRgb, qGray
import sys
import numpy as np
from typing import Callable
from numbers import Number
def process_image(
input_image: np.array,
kernel_size: int,
kernel_fn: Callable[[np.array], float]) -> np.array:
padding_width: int = kernel_size // 2
padding_height: int = kernel_size // 2
padding = ((padding_height, padding_height), (padding_width, padding_width))
input_image_padding: np.array = np.pad(
array=input_image,
pad_width=padding,
mode='edge')
result_image: np.array = np.zeros(input_image.shape, dtype='float')
image_height, image_width = result_image.shape
for image_x in range(image_width):
for image_y in range(image_height):
x_pos_begin = image_x
x_pos_end = image_x + kernel_size
y_pos_begin = image_y
y_pos_end = image_y + kernel_size
image_segment: np.array = input_image_padding[y_pos_begin:y_pos_end, x_pos_begin:x_pos_end] | return result_image
def mean_fn(
image_segment: np.array) -> float:
return float(np.mean(image_segment))
def std_fn(
image_segment: np.array) -> float:
return float(np.std(image_segment))
def convert_to_binary(
input_image: np.array,
threshold: int = 127) -> np.array:
max_val: int = 255
min_val: int = 0
initial_conv: np.array = np.where((input_image <= threshold), input_image, max_val)
final_conv: np.array = np.where((initial_conv > threshold), initial_conv, min_val)
return final_conv
def normalize_image(
input_image: np.array) -> np.array:
result_image: np.array = np.zeros(input_image.shape)
input_max = input_image.max()
input_min = input_image.min()
input_range = input_max - input_min
height, width = input_image.shape
for y in range(height):
for x in range(width):
input_value = input_image[y][x]
scaled_input_value = (input_value - input_min) / input_range if input_range != 0 else 0
result_image[y][x] = scaled_input_value * 255.0
return result_image
def fill_image(
input_image: np.array,
value: Number,
replace_value: Number):
height, width = input_image.shape
for y in range(height):
for x in range(width):
if input_image[y, x] == value:
input_image[y, x] = replace_value
def mark_objects(
input_image: np.array) -> np.array:
result_image: np.array = np.copy(input_image)
current_object_id = 1
height, width = input_image.shape
for y in range(height):
for x in range(width):
if y == 0:
c = 0
else:
c = result_image[y - 1, x]
if x == 0:
b = 0
else:
b = result_image[y, x - 1]
a = result_image[y, x]
if a == 0:
pass
elif b == 0 and c == 0:
current_object_id += 1
result_image[y, x] = current_object_id
elif b != 0 and c == 0:
result_image[y, x] = b
elif b == 0 and c != 0:
result_image[y, x] = c
elif b != 0 and c != 0:
if b == c:
result_image[y, x] = b
else:
result_image[y, x] = b
fill_image(
input_image=result_image,
value=c,
replace_value=b)
return result_image
def delete_objects(
input_image: np.array,
object_size: int):
unique_mask, hist = np.unique(input_image, return_counts=True)
for i in range(1, len(unique_mask)):
if hist[i] < object_size:
for (y, x), _ in np.ndenumerate(input_image):
if input_image[y, x] == unique_mask[i]:
input_image[y, x] = 0
class Ui(QtWidgets.QMainWindow):
def __init__(self):
super(Ui, self).__init__()
uic.loadUi('Main.ui', self)
self.action_open = self.findChild(QtWidgets.QAction, 'actionOpen')
self.action_open.triggered.connect(self.action_open_triggered)
self.action_exit = self.findChild(QtWidgets.QAction, 'actionExit')
self.action_exit.triggered.connect(self.action_exit_triggered)
self.bt_apply = self.findChild(QtWidgets.QPushButton, 'btApply')
self.bt_apply.clicked.connect(self.bt_apply_pressed)
self.input_image_canvas = QtWidgets.QLabel()
self.input_image_canvas.setBackgroundRole(QPalette.Base)
self.input_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.input_image_canvas.setScaledContents(True)
self.sa_input_image = self.findChild(QtWidgets.QScrollArea, 'saInputImage')
self.sa_input_image.setWidget(self.input_image_canvas)
self.sa_input_image.setWidgetResizable(False)
self.processed_image_canvas = QtWidgets.QLabel()
self.processed_image_canvas.setBackgroundRole(QPalette.Base)
self.processed_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.processed_image_canvas.setScaledContents(True)
self.sa_processed_image = self.findChild(QtWidgets.QScrollArea, 'saProcessedImage')
self.sa_processed_image.setWidget(self.processed_image_canvas)
self.sa_processed_image.setWidgetResizable(False)
self.mask_image_canvas = QtWidgets.QLabel()
self.mask_image_canvas.setBackgroundRole(QPalette.Base)
self.mask_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.mask_image_canvas.setScaledContents(True)
self.sa_mask_image = self.findChild(QtWidgets.QScrollArea, 'saMask')
self.sa_mask_image.setWidget(self.mask_image_canvas)
self.sa_mask_image.setWidgetResizable(False)
self.segmented_image_canvas = QtWidgets.QLabel()
self.segmented_image_canvas.setBackgroundRole(QPalette.Base)
self.segmented_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.segmented_image_canvas.setScaledContents(True)
self.sa_segmented_image = self.findChild(QtWidgets.QScrollArea, 'saSegmentedImage')
self.sa_segmented_image.setWidget(self.segmented_image_canvas)
self.sa_segmented_image.setWidgetResizable(False)
self.cb_method = self.findChild(QtWidgets.QComboBox, 'cbMethod')
self.cb_method.addItems(['Mean', 'Std'])
self.le_kernel_size = self.findChild(QtWidgets.QLineEdit, 'leKernelSize')
self.le_threshold = self.findChild(QtWidgets.QLineEdit, 'leThreshold')
self.le_delete_objects = self.findChild(QtWidgets.QLineEdit, 'leDeleteObjects')
self.show()
def action_open_triggered(self):
options = QtWidgets.QFileDialog.Options()
file_name, _ = QtWidgets.QFileDialog.\
getOpenFileName(self,
'QFileDialog.getOpenFileName()',
'',
'Images (*.png *.jpeg *.jpg *.bmp *.gif)',
options=options)
if file_name:
image = QImage(file_name).convertToFormat(QImage.Format_Grayscale8)
if image.isNull():
QtWidgets.QMessageBox.\
information(self,
"Texture segmentation",
"Cannot load %s." % file_name)
return
self.input_image_canvas.setPixmap(QPixmap.fromImage(image))
self.input_image_canvas.adjustSize()
def action_exit_triggered(self):
self.close()
def bt_apply_pressed(self):
method = self.cb_method.currentIndex()
kernel_size = int(self.le_kernel_size.text())
threshold = int(self.le_threshold.text())
object_size = int(self.le_delete_objects.text())
input_q_image = self.input_image_canvas.pixmap().toImage().convertToFormat(QImage.Format_Grayscale8)
input_image = np.zeros((input_q_image.height(), input_q_image.width()), dtype='float')
for (y, x), _ in np.ndenumerate(input_image):
input_image[y, x] = qGray(input_q_image.pixel(x, y))
if method == 0:
kernel_fn = mean_fn
elif method == 1:
kernel_fn = std_fn
else:
return
processed_image: np.array = process_image(
input_image=input_image,
kernel_size=kernel_size,
kernel_fn=kernel_fn)
normalized_image: np.array = normalize_image(input_image=processed_image)
binarized_image: np.array = convert_to_binary(input_image=normalized_image, threshold=threshold)
marked_image = mark_objects(input_image=binarized_image)
delete_objects(
input_image=marked_image,
object_size=object_size)
segmented_image = np.copy(input_image)
for (y, x), _ in np.ndenumerate(segmented_image):
if marked_image[y, x] == 0:
segmented_image[y, x] = 0
self.set_image(
input_image=normalized_image,
canvas=self.processed_image_canvas)
self.set_image(
input_image=normalize_image(
input_image=marked_image),
canvas=self.mask_image_canvas)
self.set_image(
input_image=segmented_image,
canvas=self.segmented_image_canvas)
@staticmethod
def set_image(input_image: np.array, canvas: QtWidgets.QLineEdit):
height, width = input_image.shape
q_image = QImage(width, height, QImage.Format_RGB32)
for y in range(height):
for x in range(width):
pixel = int(input_image[y, x])
q_image.setPixel(x, y, qRgb(pixel, pixel, pixel))
canvas.setPixmap(QPixmap.fromImage(q_image))
canvas.adjustSize()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_() | result_image[image_y][image_x] = kernel_fn(image_segment) |
modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import logging
import os
import warnings
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_gpt2 import PreTrainedModel, GPT2Config
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
GPT2DoubleHeadsModelOutput,
)
from transformers.activations import ACT2FN
from transformers.modeling_utils import (
Conv1D,
prune_conv1d_layer,
SequenceSummary,
find_pruneable_heads_and_indices,
)
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias",
torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
# TODO: check config.hidden_size
self.query = nn.Linear(n_state, nx)
self.key = nn.Linear(n_state, nx)
self.value = nn.Linear(n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
self.config = config
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
x,
layer_past=None,
attention_mask=None,
head_mask=None,
use_cache=False,
output_attentions=False,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if self.config.is_decoder:
assert encoder_hidden_states is not None
key = self.key(encoder_hidden_states)
value = self.value(encoder_hidden_states)
query = self.query(x)
else:
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = (
layer_past[0].transpose(-2, -1),
layer_past[1],
) # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = torch.stack(
(key.transpose(-2, -1), value)
) # transpose to have same shapes for stacking
else:
present = (None,)
if self.config.is_decoder:
attn_outputs = self._attn(
query, key, value, encoder_attention_mask, head_mask, output_attentions
)
else:
attn_outputs = self._attn(
query, key, value, attention_mask, head_mask, output_attentions
)
at = attn_outputs[0]
at = self.merge_heads(at)
at = self.c_proj(at)
at = self.resid_dropout(at)
outputs = [at, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * nx
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
self.config = config
"""
TODO: add another self attention layer?
"""
def forward(
self,
x,
layer_past=None,
attention_mask=None,
head_mask=None,
use_cache=False,
output_attentions=False,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
output_attn = self.attn(
self.ln_1(x),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
| super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]
)
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_length, input_shape[-1] + past_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if output_attentions:
all_attentions = all_attentions + (outputs[2],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
authorized_missing_keys = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
return {"input_ids": input_ids, "past_key_values": past, "use_cache": kwargs["use_cache"]}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
"""
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
Examples::
>>> import torch
>>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2, return_dict=True)
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.lm_logits
>>> mc_logits = outputs.mc_logits
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
mc_loss = None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
lm_loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return GPT2DoubleHeadsModelOutput(
lm_loss=lm_loss,
mc_loss=mc_loss,
lm_logits=lm_logits,
mc_logits=mc_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
) | def __init__(self, *inputs, **kwargs): |
msgping_test.go | // Copyright (c) 2013-2015 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire_test
import (
"bytes"
"io"
"reflect"
"testing"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
)
// TestPing tests the MsgPing API against the latest protocol version.
func TestPing(t *testing.T) {
pver := wire.ProtocolVersion
// Ensure we get the same nonce back out.
nonce, err := wire.RandomUint64()
if err != nil {
t.Errorf("RandomUint64: Error generating nonce: %v", err)
}
msg := wire.NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Ensure the command is expected value.
wantCmd := "ping"
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgPing: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(8)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
return
}
// TestPingBIP0031 tests the MsgPing API against the protocol version
// BIP0031Version.
func TestPingBIP0031(t *testing.T) {
// Use the protocol version just prior to BIP0031Version changes.
pver := wire.BIP0031Version
nonce, err := wire.RandomUint64()
if err != nil {
t.Errorf("RandomUint64: Error generating nonce: %v", err)
}
msg := wire.NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Ensure max payload is expected value for old protocol version.
wantPayload := uint32(0)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Test encode with old protocol version.
var buf bytes.Buffer
err = msg.BtcEncode(&buf, pver)
if err != nil {
t.Errorf("encode of MsgPing failed %v err <%v>", msg, err)
}
// Test decode with old protocol version.
readmsg := wire.NewMsgPing(0)
err = readmsg.BtcDecode(&buf, pver)
if err != nil {
t.Errorf("decode of MsgPing failed [%v] err <%v>", buf, err)
}
// Since this protocol version doesn't support the nonce, make sure
// it didn't get encoded and decoded back out.
if msg.Nonce == readmsg.Nonce {
t.Errorf("Should not get same nonce for protocol version %d", pver)
}
return
}
// TestPingCrossProtocol tests the MsgPing API when encoding with the latest
// protocol version and decoding with BIP0031Version.
func TestPingCrossProtocol(t *testing.T) {
nonce, err := wire.RandomUint64()
if err != nil {
t.Errorf("RandomUint64: Error generating nonce: %v", err)
}
msg := wire.NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Encode with latest protocol version.
var buf bytes.Buffer
err = msg.BtcEncode(&buf, wire.ProtocolVersion)
if err != nil {
t.Errorf("encode of MsgPing failed %v err <%v>", msg, err)
}
// Decode with old protocol version.
readmsg := wire.NewMsgPing(0)
err = readmsg.BtcDecode(&buf, wire.BIP0031Version)
if err != nil {
t.Errorf("decode of MsgPing failed [%v] err <%v>", buf, err)
}
// Since one of the protocol versions doesn't support the nonce, make
// sure it didn't get encoded and decoded back out.
if msg.Nonce == readmsg.Nonce {
t.Error("Should not get same nonce for cross protocol")
}
}
// TestPingWire tests the MsgPing wire encode and decode for various protocol
// versions.
func TestPingWire(t *testing.T) {
tests := []struct {
in wire.MsgPing // Message to encode
out wire.MsgPing // Expected decoded message
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
}{
// Latest protocol version.
{
wire.MsgPing{Nonce: 123123}, // 0x1e0f3
wire.MsgPing{Nonce: 123123}, // 0x1e0f3
[]byte{0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00},
wire.ProtocolVersion,
},
// Protocol version BIP0031Version+1
{
wire.MsgPing{Nonce: 456456}, // 0x6f708
wire.MsgPing{Nonce: 456456}, // 0x6f708
[]byte{0x08, 0xf7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00},
wire.BIP0031Version + 1,
},
// Protocol version BIP0031Version
{
wire.MsgPing{Nonce: 789789}, // 0xc0d1d
wire.MsgPing{Nonce: 0}, // No nonce for pver
[]byte{}, // No nonce for pver
wire.BIP0031Version,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
var buf bytes.Buffer
err := test.in.BtcEncode(&buf, test.pver)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("BtcEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue | }
// Decode the message from wire format.
var msg wire.MsgPing
rbuf := bytes.NewReader(test.buf)
err = msg.BtcDecode(rbuf, test.pver)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(msg, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
spew.Sdump(msg), spew.Sdump(test.out))
continue
}
}
}
// TestPingWireErrors performs negative tests against wire encode and decode
// of MsgPing to confirm error paths work correctly.
func TestPingWireErrors(t *testing.T) {
pver := wire.ProtocolVersion
tests := []struct {
in *wire.MsgPing // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Latest protocol version with intentional read/write errors.
{
&wire.MsgPing{Nonce: 123123}, // 0x1e0f3
[]byte{0xf3, 0xe0, 0x01, 0x00},
pver,
2,
io.ErrShortWrite,
io.ErrUnexpectedEOF,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
w := newFixedWriter(test.max)
err := test.in.BtcEncode(w, test.pver)
if err != test.writeErr {
t.Errorf("BtcEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
var msg wire.MsgPing
r := newFixedReader(test.max, test.buf)
err = msg.BtcDecode(r, test.pver)
if err != test.readErr {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
} | |
txn_heart_beat.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::kv::WriteData;
use crate::storage::lock_manager::LockManager;
use crate::storage::mvcc::{ErrorInner as MvccErrorInner, MvccTxn, Result as MvccResult};
use crate::storage::txn::commands::{
Command, CommandExt, ResponsePolicy, TypedCommand, WriteCommand, WriteContext, WriteResult,
};
use crate::storage::txn::Result;
use crate::storage::{ProcessResult, Snapshot, TxnStatus};
use txn_types::{Key, TimeStamp};
command! {
/// Heart beat of a transaction. It enlarges the primary lock's TTL.
///
/// This is invoked on a transaction's primary lock. The lock may be generated by either
/// [`AcquirePessimisticLock`](Command::AcquirePessimisticLock) or
/// [`Prewrite`](Command::Prewrite).
TxnHeartBeat:
cmd_ty => TxnStatus,
display => "kv::command::txn_heart_beat {} @ {} ttl {} | {:?}", (primary_key, start_ts, advise_ttl, ctx),
content => {
/// The primary key of the transaction.
primary_key: Key,
/// The transaction's start_ts.
start_ts: TimeStamp,
/// The new TTL that will be used to update the lock's TTL. If the lock's TTL is already
/// greater than `advise_ttl`, nothing will happen.
advise_ttl: u64,
}
}
impl CommandExt for TxnHeartBeat {
ctx!();
tag!(txn_heart_beat);
ts!(start_ts);
write_bytes!(primary_key);
gen_lock!(primary_key);
}
impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for TxnHeartBeat {
fn process_write(self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> {
// TxnHeartBeat never remove locks. No need to wake up waiters.
let mut txn = MvccTxn::new(
snapshot,
self.start_ts,
!self.ctx.get_not_fill_cache(),
context.concurrency_manager,
);
fail_point!("txn_heart_beat", |err| Err(
crate::storage::mvcc::Error::from(crate::storage::mvcc::txn::make_txn_error(
err,
&self.primary_key,
self.start_ts,
))
.into()
));
let lock: MvccResult<_> = if let Some(mut lock) = txn.reader.load_lock(&self.primary_key)? {
if lock.ts == self.start_ts {
if lock.ttl < self.advise_ttl {
lock.ttl = self.advise_ttl;
txn.put_lock(self.primary_key.clone(), &lock);
} else {
debug!(
"txn_heart_beat with advise_ttl not larger than current ttl";
"primary_key" => %self.primary_key,
"start_ts" => self.start_ts,
"advise_ttl" => self.advise_ttl,
"current_ttl" => lock.ttl,
);
}
Ok(lock)
} else {
debug!(
"txn_heart_beat invoked but lock is absent";
"primary_key" => %self.primary_key,
"start_ts" => self.start_ts,
"advise_ttl" => self.advise_ttl,
);
Err(MvccErrorInner::TxnLockNotFound {
start_ts: self.start_ts,
commit_ts: TimeStamp::zero(),
key: self.primary_key.clone().into_raw()?,
}
.into())
}
} else {
debug!(
"txn_heart_beat invoked but lock is absent";
"primary_key" => %self.primary_key,
"start_ts" => self.start_ts,
"advise_ttl" => self.advise_ttl,
);
Err(MvccErrorInner::TxnLockNotFound {
start_ts: self.start_ts,
commit_ts: TimeStamp::zero(),
key: self.primary_key.clone().into_raw()?,
}
.into())
};
context.statistics.add(&txn.take_statistics());
let pr = ProcessResult::TxnStatus {
txn_status: TxnStatus::uncommitted(lock?, false),
};
let write_data = WriteData::from_modifies(txn.into_modifies());
Ok(WriteResult {
ctx: self.ctx,
to_be_write: write_data,
rows: 1,
pr,
lock_info: None,
lock_guards: vec![],
response_policy: ResponsePolicy::OnApplied,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::storage::kv::TestEngineBuilder;
use crate::storage::lock_manager::DummyLockManager;
use crate::storage::mvcc::tests::*;
use crate::storage::txn::commands::WriteCommand;
use crate::storage::txn::tests::*;
use crate::storage::Engine;
use concurrency_manager::ConcurrencyManager;
use kvproto::kvrpcpb::Context;
pub fn must_success<E: Engine>(
engine: &E,
primary_key: &[u8],
start_ts: impl Into<TimeStamp>,
advise_ttl: u64,
expect_ttl: u64,
) {
let ctx = Context::default();
let snapshot = engine.snapshot(&ctx).unwrap();
let start_ts = start_ts.into();
let cm = ConcurrencyManager::new(start_ts);
let command = crate::storage::txn::commands::TxnHeartBeat {
ctx: Context::default(),
primary_key: Key::from_raw(primary_key),
start_ts,
advise_ttl,
};
let result = command
.process_write(
snapshot,
WriteContext {
lock_mgr: &DummyLockManager,
concurrency_manager: cm,
extra_op: Default::default(),
statistics: &mut Default::default(),
pipelined_pessimistic_lock: false,
async_apply_prewrite: false,
},
)
.unwrap();
if let ProcessResult::TxnStatus {
txn_status: TxnStatus::Uncommitted { lock, .. },
} = result.pr
| else {
unreachable!();
}
}
pub fn must_err<E: Engine>(
engine: &E,
primary_key: &[u8],
start_ts: impl Into<TimeStamp>,
advise_ttl: u64,
) {
let ctx = Context::default();
let snapshot = engine.snapshot(&ctx).unwrap();
let start_ts = start_ts.into();
let cm = ConcurrencyManager::new(start_ts);
let command = crate::storage::txn::commands::TxnHeartBeat {
ctx,
primary_key: Key::from_raw(primary_key),
start_ts,
advise_ttl,
};
assert!(command
.process_write(
snapshot,
WriteContext {
lock_mgr: &DummyLockManager,
concurrency_manager: cm,
extra_op: Default::default(),
statistics: &mut Default::default(),
pipelined_pessimistic_lock: false,
async_apply_prewrite: false,
},
)
.is_err());
}
#[test]
fn test_txn_heart_beat() {
let engine = TestEngineBuilder::new().build().unwrap();
let (k, v) = (b"k1", b"v1");
let test = |ts| {
// Do nothing if advise_ttl is less smaller than current TTL.
must_success(&engine, k, ts, 90, 100);
// Return the new TTL if the TTL when the TTL is updated.
must_success(&engine, k, ts, 110, 110);
// The lock's TTL is updated and persisted into the db.
must_success(&engine, k, ts, 90, 110);
// Heart beat another transaction's lock will lead to an error.
must_err(&engine, k, ts - 1, 150);
must_err(&engine, k, ts + 1, 150);
// The existing lock is not changed.
must_success(&engine, k, ts, 90, 110);
};
// No lock.
must_err(&engine, k, 5, 100);
// Create a lock with TTL=100.
// The initial TTL will be set to 0 after calling must_prewrite_put. Update it first.
must_prewrite_put(&engine, k, v, k, 5);
must_locked(&engine, k, 5);
must_success(&engine, k, 5, 100, 100);
test(5);
must_locked(&engine, k, 5);
must_commit(&engine, k, 5, 10);
must_unlocked(&engine, k);
// No lock.
must_err(&engine, k, 5, 100);
must_err(&engine, k, 10, 100);
must_acquire_pessimistic_lock(&engine, k, k, 8, 15);
must_pessimistic_locked(&engine, k, 8, 15);
must_success(&engine, k, 8, 100, 100);
test(8);
must_pessimistic_locked(&engine, k, 8, 15);
}
}
| {
write(engine, &ctx, result.to_be_write.modifies);
assert_eq!(lock.ttl, expect_ttl);
} |
set_server_certificate.ts | /**
* Copyright 2015 CANAL+ Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0 | * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
defer as observableDefer,
EMPTY,
Observable,
of as observableOf,
} from "rxjs";
import {
catchError,
ignoreElements,
} from "rxjs/operators";
import { ICustomMediaKeys } from "../../compat";
import {
EncryptedMediaError,
} from "../../errors";
import log from "../../log";
import castToObservable from "../../utils/cast_to_observable";
import {
IEMEWarningEvent,
TypedArray,
} from "./types";
/**
* Call the setServerCertificate API with the given certificate.
* Complete observable on success, throw when failed.
*
* TODO Handle returned value?
* From the spec:
* - setServerCertificate resolves with true if everything worked
* - it resolves with false if the CDM does not support server
* certificates.
*
* @param {MediaKeys} mediaKeys
* @param {ArrayBuffer} serverCertificate
* @returns {Observable}
*/
function setServerCertificate(
mediaKeys : ICustomMediaKeys|MediaKeys,
serverCertificate : ArrayBuffer|TypedArray
) : Observable<unknown> {
return observableDefer(() => {
return castToObservable(
(mediaKeys as MediaKeys).setServerCertificate(serverCertificate)
).pipe(catchError((error: unknown) => {
log.warn("EME: mediaKeys.setServerCertificate returned an error", error);
const reason = error instanceof Error ? error.toString() :
"`setServerCertificate` error";
throw new EncryptedMediaError("LICENSE_SERVER_CERTIFICATE_ERROR", reason);
}));
});
}
/**
* Call the setCertificate API. If it fails just emit the error as warning
* and complete.
* @param {MediaKeys} mediaKeys
* @param {ArrayBuffer} serverCertificate
* @returns {Observable}
*/
export default function trySettingServerCertificate(
mediaKeys : ICustomMediaKeys|MediaKeys,
serverCertificate : ArrayBuffer|TypedArray
) : Observable<IEMEWarningEvent> {
return observableDefer(() => {
if (typeof mediaKeys.setServerCertificate !== "function") {
log.warn("EME: Could not set the server certificate." +
" mediaKeys.setServerCertificate is not a function");
return EMPTY;
}
log.debug("EME: Setting server certificate on the MediaKeys");
return setServerCertificate(mediaKeys, serverCertificate).pipe(
ignoreElements(),
catchError(error => observableOf({ type: "warning" as const, value: error })));
});
}
export {
trySettingServerCertificate,
setServerCertificate,
}; | * |
utils_babi_mem2seq.py | # Modified by Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch
import torch.utils.data as data
from torch.autograd import Variable
from utils.config import *
from utils.until_temp import entityList
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
MEM_TOKEN_SIZE = 3
class Lang:
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = {UNK_token: 'UNK', PAD_token: "PAD", EOS_token: "EOS", SOS_token: "SOS"}
self.n_words = 4 # Count default tokens
def index_words(self, story, trg=False):
if trg:
for word in story.split(' '):
self.index_word(word)
else:
for word_triple in story:
for word in word_triple:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
class Dataset(data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len, conv_seq,ent,ID,kb_arr):
"""Reads source and target sequences from txt files."""
self.src_seqs = src_seq
self.trg_seqs = trg_seq
self.index_seqs = index_seq
self.gate_seq = gate_seq
self.num_total_seqs = len(self.src_seqs)
self.src_word2id = src_word2id
self.trg_word2id = trg_word2id
self.max_len = max_len
self.conv_seq = conv_seq
self.ent = ent
self.ID = ID
self.kb_arr = kb_arr
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
index_s = self.index_seqs[index]
gete_s = self.gate_seq[index]
src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)
trg_seq = self.preprocess(trg_seq, self.trg_word2id)
index_s = self.preprocess_inde(index_s,src_seq)
gete_s = self.preprocess_gate(gete_s)
conv_seq = self.conv_seq[index]
conv_seq = self.preprocess(conv_seq, self.src_word2id, trg=False)
ID = self.ID[index]
kb_arr = self.kb_arr[index]
return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index], conv_seq,self.ent[index], ID, kb_arr
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence, word2id, trg=True):
"""Converts words to ids."""
if trg:
story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]
else:
story = []
for i, word_triple in enumerate(sequence):
story.append([])
for ii, word in enumerate(word_triple):
temp = word2id[word] if word in word2id else UNK_token
story[i].append(temp)
try:
story = torch.Tensor(story)
except:
print(sequence)
print(story)
return story
def preprocess_inde(self, sequence, src_seq):
"""Converts words to ids."""
sequence = sequence + [len(src_seq)-1]
sequence = torch.Tensor(sequence)
return sequence
def preprocess_gate(self, sequence):
"""Converts words to ids."""
sequence = sequence + [0]
sequence = torch.Tensor(sequence)
return sequence
def collate_fn(data):
def merge(sequences,max_len):
lengths = [len(seq) for seq in sequences]
if (max_len):
padded_seqs = torch.ones(len(sequences), max(lengths), MEM_TOKEN_SIZE).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i,:end,:] = seq[:end]
else:
padded_seqs = torch.ones(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain, conv_seq, ent, ID, kb_arr = zip(*data)
# merge sequences (from tuple of 1D tensor to 2D tensor)
src_seqs, src_lengths = merge(src_seqs,max_len)
trg_seqs, trg_lengths = merge(trg_seqs,None)
ind_seqs, _ = merge(ind_seqs,None)
gete_s, _ = merge(gete_s,None)
conv_seqs, conv_lengths = merge(conv_seq, max_len)
src_seqs = Variable(src_seqs).transpose(0,1)
trg_seqs = Variable(trg_seqs).transpose(0,1)
ind_seqs = Variable(ind_seqs).transpose(0,1)
gete_s = Variable(gete_s).transpose(0,1)
conv_seqs = Variable(conv_seqs).transpose(0,1)
if USE_CUDA:
src_seqs = src_seqs.cuda()
trg_seqs = trg_seqs.cuda()
ind_seqs = ind_seqs.cuda()
gete_s = gete_s.cuda()
conv_seqs = conv_seqs.cuda()
return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain, conv_seqs, conv_lengths, ent, ID, kb_arr
def read_langs(file_name, entity, max_line = None):
logging.info(("Reading lines from {}".format(file_name)))
data=[]
contex_arr = []
conversation_arr = []
kb_arr = []
u=None
r=None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_name) as fin:
cnt_ptr = 0
cnt_voc = 0
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
if '\t' in line:
u, r = line.split('\t')
if u!='<SILENCE>': user_counter += 1
system_counter += 1
gen_u = generate_memory(u, "$u", str(time_counter))
contex_arr += gen_u
conversation_arr += gen_u
r_index = []
gate = []
for key in r.split(' '):
if ENTPTR:
if (key in entity):
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
cnt_voc +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
else:
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
r_index.append(index)
system_res_counter += 1
if len(r_index) > max_r_len:
max_r_len = len(r_index)
contex_arr_temp = contex_arr + [['$$$$']*MEM_TOKEN_SIZE]
ent = []
for key in r.split(' '):
if(key in entity):
ent.append(key)
data.append([contex_arr_temp,r,r_index,gate,list(conversation_arr),ent,dialog_counter, kb_arr])
gen_r = generate_memory(r, "$s", str(time_counter))
contex_arr += gen_r
conversation_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r=line
if USEKB:
temp = generate_memory(r, "", "")
contex_arr += temp
kb_arr += temp
else:
cnt_lin+=1
if(max_line and cnt_lin>=max_line):
break
contex_arr=[]
conversation_arr = []
kb_arr = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d[0]) for d in data])
logging.info("Pointer percentace= {} ".format(cnt_ptr/(cnt_ptr+cnt_voc)))
logging.info("Max responce Len: {}".format(max_r_len))
logging.info("Max Input Len: {}".format(max_len))
logging.info("Avg. User Utterances: {}".format(user_counter*1.0/dialog_counter))
logging.info("Avg. Bot Utterances: {}".format(system_counter*1.0/dialog_counter))
logging.info("Avg. KB results: {}".format(KB_counter*1.0/dialog_counter))
logging.info("Avg. responce Len: {}".format(system_res_counter*1.0/system_counter))
print('Sample: ',data[1][0],data[1][1],data[1][2],data[1][3])
return data, max_len, max_r_len
def generate_memory(sent, speaker, time):
sent_new = []
sent_token = sent.split(' ')
if speaker=="$u" or speaker=="$s":
for word in sent_token:
temp = [word, speaker, 't'+str(time)] + ["PAD"]*(MEM_TOKEN_SIZE-3)
sent_new.append(temp)
else:
if sent_token[1]=="R_rating":
sent_token = sent_token + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
else:
sent_token = sent_token[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
sent_new.append(sent_token)
return sent_new
def get_seq(pairs,lang,batch_size,type,max_len):
x_seq = []
y_seq = []
ptr_seq = []
gate_seq = []
conv_seq = []
ent = []
ID = []
kb_arr = []
for pair in pairs:
x_seq.append(pair[0])
y_seq.append(pair[1])
ptr_seq.append(pair[2])
gate_seq.append(pair[3])
conv_seq.append(pair[4])
ent.append(pair[5])
ID.append(pair[6])
kb_arr.append(pair[7])
if(type):
lang.index_words(pair[0])
lang.index_words(pair[1], trg=True)
dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len, conv_seq,ent,ID,kb_arr)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=type,
collate_fn=collate_fn)
return data_loader
def prepare_data_seq(task,batch_size=100,shuffle=True):
file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)
file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)
file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)
if (int(task) != 6):
file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)
if int(task)!=6:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(task))
else:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(task))
pair_train,max_len_train, max_r_train = read_langs(file_train, ent, max_line=None)
pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, ent, max_line=None)
pair_test,max_len_test, max_r_test = read_langs(file_test, ent, max_line=None)
max_r_test_OOV = 0
max_len_test_OOV = 0
if (int(task) != 6):
pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, ent, max_line=None)
max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) + 1
max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1
lang = Lang()
train = get_seq(pair_train,lang,batch_size,True,max_len)
dev = get_seq(pair_dev,lang,batch_size,False,max_len)
test = get_seq(pair_test,lang,batch_size,False,max_len) | testOOV = []
logging.info("Read %s sentence pairs train" % len(pair_train))
logging.info("Read %s sentence pairs dev" % len(pair_dev))
logging.info("Read %s sentence pairs test" % len(pair_test))
if (int(task) != 6):
logging.info("Read %s sentence pairs test" % len(pair_test_OOV))
logging.info("Max len Input %s " % max_len)
logging.info("Vocab_size %s " % lang.n_words)
logging.info("USE_CUDA={}".format(USE_CUDA))
return train, dev, test, testOOV, lang, max_len, max_r | if (int(task) != 6):
testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)
else: |
funcoes.py | def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('ERRO: Por favor digite um número inteiro válido.')
continue #Para jogar novamente pro while
except KeyboardInterrupt:
print('Usuário preferiu não digitar esse número.')
return 0
else:
return n
def linha(tam = 42):
return '-' * tam
def cabeçalho(txt):
print(linha())
print(txt.center(42))
print(linha())
def menu(lista):
cabeça | lho('MENU PRINCIPAL')
c = 1
for item in lista:
print(f'{c} - {item}')
c += 1
print(linha())
opc = leiaInt('Sua opção: ')
return opc
|
|
kvs.rs |
fn main() {
let matches = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.setting(AppSettings::DisableHelpSubcommand)
.setting(AppSettings::SubcommandRequiredElseHelp)
.setting(AppSettings::VersionlessSubcommands)
.subcommand(
SubCommand::with_name("set")
.about("Set the value of a string key to a string")
.arg(Arg::with_name("KEY").help("A string key").required(true))
.arg(
Arg::with_name("VALUE")
.help("The string value of the key")
.required(true),
),
)
.subcommand(
SubCommand::with_name("get")
.about("Get the string value of a given string key")
.arg(Arg::with_name("KEY").help("A string key").required(true)),
)
.subcommand(
SubCommand::with_name("rm")
.about("Remove a given key")
.arg(Arg::with_name("KEY").help("A string key").required(true)),
)
.get_matches();
match matches.subcommand() {
("set", Some(_matches)) => {
eprintln!("unimplemented");
exit(1);
}
("get", Some(_matches)) => {
eprintln!("unimplemented");
exit(1);
}
("rm", Some(_matches)) => {
eprintln!("unimplemented");
exit(1);
}
_ => unreachable!(),
}
} | use clap::{App, AppSettings, Arg, SubCommand};
use std::process::exit; |
|
resolve_lifetime.rs | //! Name resolution for lifetimes.
//!
//! Name resolution for lifetimes follows *much* simpler rules than the
//! full resolve. For example, lifetime names are never exported or
//! used between functions, and they operate in a purely top-down
//! way. Therefore, we break lifetime name resolution into a separate pass.
use crate::hir::def::{DefKind, Res};
use crate::hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
use crate::hir::map::Map;
use crate::hir::ptr::P;
use crate::hir::{GenericArg, GenericParam, ItemLocalId, LifetimeName, Node, ParamName, QPath};
use crate::ty::{self, DefIdTree, GenericParamDefKind, TyCtxt};
use crate::rustc::lint;
use crate::session::Session;
use crate::util::nodemap::{DefIdMap, FxHashMap, FxHashSet, HirIdMap, HirIdSet};
use errors::{pluralize, Applicability, DiagnosticBuilder};
use rustc_macros::HashStable;
use std::borrow::Cow;
use std::cell::Cell;
use std::mem::{replace, take};
use syntax::ast;
use syntax::attr;
use syntax::symbol::{kw, sym};
use syntax_pos::Span;
use crate::hir::intravisit::{self, NestedVisitorMap, Visitor};
use crate::hir::{self, GenericParamKind, LifetimeParamKind};
use rustc_error_codes::*;
/// The origin of a named lifetime definition.
///
/// This is used to prevent the usage of in-band lifetimes in `Fn`/`fn` syntax.
#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, HashStable)]
pub enum LifetimeDefOrigin {
// Explicit binders like `fn foo<'a>(x: &'a u8)` or elided like `impl Foo<&u32>`
ExplicitOrElided,
// In-band declarations like `fn foo(x: &'a u8)`
InBand,
// Some kind of erroneous origin
Error,
}
impl LifetimeDefOrigin {
fn from_param(param: &GenericParam) -> Self {
match param.kind {
GenericParamKind::Lifetime { kind } => match kind {
LifetimeParamKind::InBand => LifetimeDefOrigin::InBand,
LifetimeParamKind::Explicit => LifetimeDefOrigin::ExplicitOrElided,
LifetimeParamKind::Elided => LifetimeDefOrigin::ExplicitOrElided,
LifetimeParamKind::Error => LifetimeDefOrigin::Error,
},
_ => bug!("expected a lifetime param"),
}
}
}
// This counts the no of times a lifetime is used
#[derive(Clone, Copy, Debug)]
pub enum LifetimeUseSet<'tcx> {
One(&'tcx hir::Lifetime),
Many,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, HashStable)]
pub enum Region {
Static,
EarlyBound(/* index */ u32, /* lifetime decl */ DefId, LifetimeDefOrigin),
LateBound(ty::DebruijnIndex, /* lifetime decl */ DefId, LifetimeDefOrigin),
LateBoundAnon(ty::DebruijnIndex, /* anon index */ u32),
Free(DefId, /* lifetime decl */ DefId),
}
impl Region {
fn early(hir_map: &Map<'_>, index: &mut u32, param: &GenericParam) -> (ParamName, Region) {
let i = *index;
*index += 1;
let def_id = hir_map.local_def_id(param.hir_id);
let origin = LifetimeDefOrigin::from_param(param);
debug!("Region::early: index={} def_id={:?}", i, def_id);
(param.name.modern(), Region::EarlyBound(i, def_id, origin))
}
fn late(hir_map: &Map<'_>, param: &GenericParam) -> (ParamName, Region) {
let depth = ty::INNERMOST;
let def_id = hir_map.local_def_id(param.hir_id);
let origin = LifetimeDefOrigin::from_param(param);
debug!(
"Region::late: param={:?} depth={:?} def_id={:?} origin={:?}",
param, depth, def_id, origin,
);
(param.name.modern(), Region::LateBound(depth, def_id, origin))
}
fn late_anon(index: &Cell<u32>) -> Region {
let i = index.get();
index.set(i + 1);
let depth = ty::INNERMOST;
Region::LateBoundAnon(depth, i)
}
fn id(&self) -> Option<DefId> {
match *self {
Region::Static | Region::LateBoundAnon(..) => None,
Region::EarlyBound(_, id, _) | Region::LateBound(_, id, _) | Region::Free(_, id) => {
Some(id)
}
}
}
fn shifted(self, amount: u32) -> Region {
match self {
Region::LateBound(debruijn, id, origin) => {
Region::LateBound(debruijn.shifted_in(amount), id, origin)
}
Region::LateBoundAnon(debruijn, index) => {
Region::LateBoundAnon(debruijn.shifted_in(amount), index)
}
_ => self,
}
}
fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region {
match self {
Region::LateBound(debruijn, id, origin) => {
Region::LateBound(debruijn.shifted_out_to_binder(binder), id, origin)
}
Region::LateBoundAnon(debruijn, index) => {
Region::LateBoundAnon(debruijn.shifted_out_to_binder(binder), index)
}
_ => self,
}
}
fn subst<'a, L>(self, mut params: L, map: &NamedRegionMap) -> Option<Region>
where
L: Iterator<Item = &'a hir::Lifetime>,
{
if let Region::EarlyBound(index, _, _) = self {
params.nth(index as usize).and_then(|lifetime| map.defs.get(&lifetime.hir_id).cloned())
} else {
Some(self)
}
}
}
/// A set containing, at most, one known element.
/// If two distinct values are inserted into a set, then it
/// becomes `Many`, which can be used to detect ambiguities.
#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug, HashStable)]
pub enum Set1<T> {
Empty,
One(T),
Many,
}
impl<T: PartialEq> Set1<T> {
pub fn insert(&mut self, value: T) {
*self = match self {
Set1::Empty => Set1::One(value),
Set1::One(old) if *old == value => return,
_ => Set1::Many,
};
}
}
pub type ObjectLifetimeDefault = Set1<Region>;
/// Maps the id of each lifetime reference to the lifetime decl
/// that it corresponds to.
///
/// FIXME. This struct gets converted to a `ResolveLifetimes` for
/// actual use. It has the same data, but indexed by `DefIndex`. This
/// is silly.
#[derive(Default)]
struct NamedRegionMap {
// maps from every use of a named (not anonymous) lifetime to a
// `Region` describing how that region is bound
pub defs: HirIdMap<Region>,
// the set of lifetime def ids that are late-bound; a region can
// be late-bound if (a) it does NOT appear in a where-clause and
// (b) it DOES appear in the arguments.
pub late_bound: HirIdSet,
// For each type and trait definition, maps type parameters
// to the trait object lifetime defaults computed from them.
pub object_lifetime_defaults: HirIdMap<Vec<ObjectLifetimeDefault>>,
}
/// See [`NamedRegionMap`].
#[derive(Default, HashStable)]
pub struct ResolveLifetimes {
defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>,
late_bound: FxHashMap<LocalDefId, FxHashSet<ItemLocalId>>,
object_lifetime_defaults:
FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ObjectLifetimeDefault>>>,
}
struct LifetimeContext<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
map: &'a mut NamedRegionMap,
scope: ScopeRef<'a>,
/// This is slightly complicated. Our representation for poly-trait-refs contains a single
/// binder and thus we only allow a single level of quantification. However,
/// the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>`
/// and `for <'a, 'b> &'b T: Foo<'a>`. In order to get the De Bruijn indices
/// correct when representing these constraints, we should only introduce one
/// scope. However, we want to support both locations for the quantifier and
/// during lifetime resolution we want precise information (so we can't
/// desugar in an earlier phase).
///
/// So, if we encounter a quantifier at the outer scope, we set
/// `trait_ref_hack` to `true` (and introduce a scope), and then if we encounter
/// a quantifier at the inner scope, we error. If `trait_ref_hack` is `false`,
/// then we introduce the scope at the inner quantifier.
trait_ref_hack: bool,
/// Used to disallow the use of in-band lifetimes in `fn` or `Fn` syntax.
is_in_fn_syntax: bool,
/// List of labels in the function/method currently under analysis.
labels_in_fn: Vec<ast::Ident>,
/// Cache for cross-crate per-definition object lifetime defaults.
xcrate_object_lifetime_defaults: DefIdMap<Vec<ObjectLifetimeDefault>>,
lifetime_uses: &'a mut DefIdMap<LifetimeUseSet<'tcx>>,
}
#[derive(Debug)]
enum Scope<'a> {
/// Declares lifetimes, and each can be early-bound or late-bound.
/// The `DebruijnIndex` of late-bound lifetimes starts at `1` and
/// it should be shifted by the number of `Binder`s in between the
/// declaration `Binder` and the location it's referenced from.
Binder {
lifetimes: FxHashMap<hir::ParamName, Region>,
/// if we extend this scope with another scope, what is the next index
/// we should use for an early-bound region?
next_early_index: u32,
/// Flag is set to true if, in this binder, `'_` would be
/// equivalent to a "single-use region". This is true on
/// impls, but not other kinds of items.
track_lifetime_uses: bool,
/// Whether or not this binder would serve as the parent
/// binder for opaque types introduced within. For example:
///
/// fn foo<'a>() -> impl for<'b> Trait<Item = impl Trait2<'a>>
///
/// Here, the opaque types we create for the `impl Trait`
/// and `impl Trait2` references will both have the `foo` item
/// as their parent. When we get to `impl Trait2`, we find
/// that it is nested within the `for<>` binder -- this flag
/// allows us to skip that when looking for the parent binder
/// of the resulting opaque type.
opaque_type_parent: bool,
s: ScopeRef<'a>,
},
/// Lifetimes introduced by a fn are scoped to the call-site for that fn,
/// if this is a fn body, otherwise the original definitions are used.
/// Unspecified lifetimes are inferred, unless an elision scope is nested,
/// e.g., `(&T, fn(&T) -> &T);` becomes `(&'_ T, for<'a> fn(&'a T) -> &'a T)`.
Body {
id: hir::BodyId,
s: ScopeRef<'a>,
},
/// A scope which either determines unspecified lifetimes or errors
/// on them (e.g., due to ambiguity). For more details, see `Elide`.
Elision {
elide: Elide,
s: ScopeRef<'a>,
},
/// Use a specific lifetime (if `Some`) or leave it unset (to be
/// inferred in a function body or potentially error outside one),
/// for the default choice of lifetime in a trait object type.
ObjectLifetimeDefault {
lifetime: Option<Region>,
s: ScopeRef<'a>,
},
Root,
}
#[derive(Clone, Debug)]
enum Elide {
/// Use a fresh anonymous late-bound lifetime each time, by
/// incrementing the counter to generate sequential indices.
FreshLateAnon(Cell<u32>),
/// Always use this one lifetime.
Exact(Region),
/// Less or more than one lifetime were found, error on unspecified.
Error(Vec<ElisionFailureInfo>),
}
#[derive(Clone, Debug)]
struct ElisionFailureInfo {
/// Where we can find the argument pattern.
parent: Option<hir::BodyId>,
/// The index of the argument in the original definition.
index: usize,
lifetime_count: usize,
have_bound_regions: bool,
}
type ScopeRef<'a> = &'a Scope<'a>;
const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root;
pub fn provide(providers: &mut ty::query::Providers<'_>) {
*providers = ty::query::Providers {
resolve_lifetimes,
named_region_map: |tcx, id| {
let id = LocalDefId::from_def_id(DefId::local(id)); // (*)
tcx.resolve_lifetimes(LOCAL_CRATE).defs.get(&id)
},
is_late_bound_map: |tcx, id| {
let id = LocalDefId::from_def_id(DefId::local(id)); // (*)
tcx.resolve_lifetimes(LOCAL_CRATE).late_bound.get(&id)
},
object_lifetime_defaults_map: |tcx, id| {
let id = LocalDefId::from_def_id(DefId::local(id)); // (*)
tcx.resolve_lifetimes(LOCAL_CRATE).object_lifetime_defaults.get(&id)
},
..*providers
};
// (*) FIXME the query should be defined to take a LocalDefId
}
/// Computes the `ResolveLifetimes` map that contains data for the
/// entire crate. You should not read the result of this query
/// directly, but rather use `named_region_map`, `is_late_bound_map`,
/// etc.
fn resolve_lifetimes(tcx: TyCtxt<'_>, for_krate: CrateNum) -> &ResolveLifetimes {
assert_eq!(for_krate, LOCAL_CRATE);
let named_region_map = krate(tcx);
let mut rl = ResolveLifetimes::default();
for (hir_id, v) in named_region_map.defs {
let map = rl.defs.entry(hir_id.owner_local_def_id()).or_default();
map.insert(hir_id.local_id, v);
}
for hir_id in named_region_map.late_bound {
let map = rl.late_bound.entry(hir_id.owner_local_def_id()).or_default();
map.insert(hir_id.local_id);
}
for (hir_id, v) in named_region_map.object_lifetime_defaults {
let map = rl.object_lifetime_defaults.entry(hir_id.owner_local_def_id()).or_default();
map.insert(hir_id.local_id, v);
}
tcx.arena.alloc(rl)
}
fn krate(tcx: TyCtxt<'_>) -> NamedRegionMap {
let krate = tcx.hir().krate();
let mut map = NamedRegionMap {
defs: Default::default(),
late_bound: Default::default(),
object_lifetime_defaults: compute_object_lifetime_defaults(tcx),
};
{
let mut visitor = LifetimeContext {
tcx,
map: &mut map,
scope: ROOT_SCOPE,
trait_ref_hack: false,
is_in_fn_syntax: false,
labels_in_fn: vec![],
xcrate_object_lifetime_defaults: Default::default(),
lifetime_uses: &mut Default::default(),
};
for (_, item) in &krate.items {
visitor.visit_item(item);
}
}
map
}
/// In traits, there is an implicit `Self` type parameter which comes before the generics.
/// We have to account for this when computing the index of the other generic parameters.
/// This function returns whether there is such an implicit parameter defined on the given item.
fn sub_items_have_self_param(node: &hir::ItemKind<'_>) -> bool {
match *node {
hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..) => true,
_ => false,
}
}
impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
// We want to nest trait/impl items in their parent, but nothing else.
fn visit_nested_item(&mut self, _: hir::ItemId) {}
fn visit_nested_body(&mut self, body: hir::BodyId) {
// Each body has their own set of labels, save labels.
let saved = take(&mut self.labels_in_fn);
let body = self.tcx.hir().body(body);
extract_labels(self, body);
self.with(Scope::Body { id: body.id(), s: self.scope }, |_, this| {
this.visit_body(body);
});
replace(&mut self.labels_in_fn, saved);
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
match item.kind {
hir::ItemKind::Fn(ref sig, ref generics, _) => {
self.visit_early_late(None, &sig.decl, generics, |this| {
intravisit::walk_item(this, item);
});
}
hir::ItemKind::ExternCrate(_)
| hir::ItemKind::Use(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::ForeignMod(..)
| hir::ItemKind::GlobalAsm(..) => {
// These sorts of items have no lifetime parameters at all.
intravisit::walk_item(self, item);
}
hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => {
// No lifetime parameters, but implied 'static.
let scope = Scope::Elision { elide: Elide::Exact(Region::Static), s: ROOT_SCOPE };
self.with(scope, |_, this| intravisit::walk_item(this, item));
}
hir::ItemKind::OpaqueTy(hir::OpaqueTy { impl_trait_fn: Some(_), .. }) => {
// Currently opaque type declarations are just generated from `impl Trait`
// items. Doing anything on this node is irrelevant, as we currently don't need
// it.
}
hir::ItemKind::TyAlias(_, ref generics)
| hir::ItemKind::OpaqueTy(hir::OpaqueTy {
impl_trait_fn: None, ref generics, ..
})
| hir::ItemKind::Enum(_, ref generics)
| hir::ItemKind::Struct(_, ref generics)
| hir::ItemKind::Union(_, ref generics)
| hir::ItemKind::Trait(_, _, ref generics, ..)
| hir::ItemKind::TraitAlias(ref generics, ..)
| hir::ItemKind::Impl(_, _, _, ref generics, ..) => {
// Impls permit `'_` to be used and it is equivalent to "some fresh lifetime name".
// This is not true for other kinds of items.x
let track_lifetime_uses = match item.kind {
hir::ItemKind::Impl(..) => true,
_ => false,
};
// These kinds of items have only early-bound lifetime parameters.
let mut index = if sub_items_have_self_param(&item.kind) {
1 // Self comes before lifetimes
} else {
0
};
let mut non_lifetime_count = 0;
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index: index + non_lifetime_count,
opaque_type_parent: true,
track_lifetime_uses,
s: ROOT_SCOPE,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
intravisit::walk_item(this, item);
});
}
}
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
match item.kind {
hir::ForeignItemKind::Fn(ref decl, _, ref generics) => {
self.visit_early_late(None, decl, generics, |this| {
intravisit::walk_foreign_item(this, item);
})
}
hir::ForeignItemKind::Static(..) => {
intravisit::walk_foreign_item(self, item);
}
hir::ForeignItemKind::Type => {
intravisit::walk_foreign_item(self, item);
}
}
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
debug!("visit_ty: id={:?} ty={:?}", ty.hir_id, ty);
debug!("visit_ty: ty.kind={:?}", ty.kind);
match ty.kind {
hir::TyKind::BareFn(ref c) => {
let next_early_index = self.next_early_index();
let was_in_fn_syntax = self.is_in_fn_syntax;
self.is_in_fn_syntax = true;
let scope = Scope::Binder {
lifetimes: c
.generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::late(&self.tcx.hir(), param))
}
_ => None,
})
.collect(),
s: self.scope,
next_early_index,
track_lifetime_uses: true,
opaque_type_parent: false,
};
self.with(scope, |old_scope, this| {
// a bare fn has no bounds, so everything
// contained within is scoped within its binder.
this.check_lifetime_params(old_scope, &c.generic_params);
intravisit::walk_ty(this, ty);
});
self.is_in_fn_syntax = was_in_fn_syntax;
}
hir::TyKind::TraitObject(ref bounds, ref lifetime) => {
debug!("visit_ty: TraitObject(bounds={:?}, lifetime={:?})", bounds, lifetime);
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
match lifetime.name {
LifetimeName::Implicit => {
// For types like `dyn Foo`, we should
// generate a special form of elided.
span_bug!(ty.span, "object-lifetime-default expected, not implict",);
}
LifetimeName::ImplicitObjectLifetimeDefault => {
// If the user does not write *anything*, we
// use the object lifetime defaulting
// rules. So e.g., `Box<dyn Debug>` becomes
// `Box<dyn Debug + 'static>`.
self.resolve_object_lifetime_default(lifetime)
}
LifetimeName::Underscore => {
// If the user writes `'_`, we use the *ordinary* elision
// rules. So the `'_` in e.g., `Box<dyn Debug + '_>` will be
// resolved the same as the `'_` in `&'_ Foo`.
//
// cc #48468
self.resolve_elided_lifetimes(vec![lifetime])
}
LifetimeName::Param(_) | LifetimeName::Static => {
// If the user wrote an explicit name, use that.
self.visit_lifetime(lifetime);
}
LifetimeName::Error => {}
}
}
hir::TyKind::Rptr(ref lifetime_ref, ref mt) => {
self.visit_lifetime(lifetime_ref);
let scope = Scope::ObjectLifetimeDefault {
lifetime: self.map.defs.get(&lifetime_ref.hir_id).cloned(),
s: self.scope,
};
self.with(scope, |_, this| this.visit_ty(&mt.ty));
}
hir::TyKind::Def(item_id, ref lifetimes) => {
// Resolve the lifetimes in the bounds to the lifetime defs in the generics.
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
// `type MyAnonTy<'b> = impl MyTrait<'b>;`
// ^ ^ this gets resolved in the scope of
// the opaque_ty generics
let (generics, bounds) = match self.tcx.hir().expect_item(item_id.id).kind {
// Named opaque `impl Trait` types are reached via `TyKind::Path`.
// This arm is for `impl Trait` in the types of statics, constants and locals.
hir::ItemKind::OpaqueTy(hir::OpaqueTy { impl_trait_fn: None, .. }) => {
intravisit::walk_ty(self, ty);
return;
}
// RPIT (return position impl trait)
hir::ItemKind::OpaqueTy(hir::OpaqueTy { ref generics, ref bounds, .. }) => {
(generics, bounds)
}
ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
};
// Resolve the lifetimes that are applied to the opaque type.
// These are resolved in the current scope.
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
// `fn foo<'a>() -> MyAnonTy<'a> { ... }`
// ^ ^this gets resolved in the current scope
for lifetime in lifetimes {
if let hir::GenericArg::Lifetime(lifetime) = lifetime {
self.visit_lifetime(lifetime);
// Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
// and ban them. Type variables instantiated inside binders aren't
// well-supported at the moment, so this doesn't work.
// In the future, this should be fixed and this error should be removed.
let def = self.map.defs.get(&lifetime.hir_id).cloned();
if let Some(Region::LateBound(_, def_id, _)) = def {
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) {
// Ensure that the parent of the def is an item, not HRTB
let parent_id = self.tcx.hir().get_parent_node(hir_id);
let parent_impl_id = hir::ImplItemId { hir_id: parent_id };
let parent_trait_id = hir::TraitItemId { hir_id: parent_id };
let krate = self.tcx.hir().forest.krate();
if !(krate.items.contains_key(&parent_id)
|| krate.impl_items.contains_key(&parent_impl_id)
|| krate.trait_items.contains_key(&parent_trait_id))
{
span_err!(
self.tcx.sess,
lifetime.span,
E0657,
"`impl Trait` can only capture lifetimes \
bound at the fn or impl level"
);
self.uninsert_lifetime_on_error(lifetime, def.unwrap());
}
}
}
}
}
// We want to start our early-bound indices at the end of the parent scope,
// not including any parent `impl Trait`s.
let mut index = self.next_early_index_for_opaque_type();
debug!("visit_ty: index = {}", index);
let mut elision = None;
let mut lifetimes = FxHashMap::default();
let mut non_lifetime_count = 0;
for param in &generics.params {
match param.kind {
GenericParamKind::Lifetime { .. } => {
let (name, reg) = Region::early(&self.tcx.hir(), &mut index, ¶m);
let def_id = if let Region::EarlyBound(_, def_id, _) = reg {
def_id
} else {
bug!();
};
if let hir::ParamName::Plain(param_name) = name {
if param_name.name == kw::UnderscoreLifetime {
// Pick the elided lifetime "definition" if one exists
// and use it to make an elision scope.
self.lifetime_uses.insert(def_id.clone(), LifetimeUseSet::Many);
elision = Some(reg);
} else {
lifetimes.insert(name, reg);
}
} else {
self.lifetime_uses.insert(def_id.clone(), LifetimeUseSet::Many);
lifetimes.insert(name, reg);
}
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
}
}
}
let next_early_index = index + non_lifetime_count;
if let Some(elision_region) = elision {
let scope =
Scope::Elision { elide: Elide::Exact(elision_region), s: self.scope };
self.with(scope, |_old_scope, this| {
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: this.scope,
track_lifetime_uses: true,
opaque_type_parent: false,
};
this.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
});
});
} else {
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: false,
};
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
});
}
}
_ => intravisit::walk_ty(self, ty),
}
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
use self::hir::TraitItemKind::*;
match trait_item.kind {
Method(ref sig, _) => {
let tcx = self.tcx;
self.visit_early_late(
Some(tcx.hir().get_parent_item(trait_item.hir_id)),
&sig.decl,
&trait_item.generics,
|this| intravisit::walk_trait_item(this, trait_item),
);
}
Type(ref bounds, ref ty) => {
let generics = &trait_item.generics;
let mut index = self.next_early_index();
debug!("visit_ty: index = {}", index);
let mut non_lifetime_count = 0;
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index: index + non_lifetime_count,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: true,
};
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
if let Some(ty) = ty {
this.visit_ty(ty);
}
});
}
Const(_, _) => {
// Only methods and types support generics.
assert!(trait_item.generics.params.is_empty());
intravisit::walk_trait_item(self, trait_item);
}
}
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
use self::hir::ImplItemKind::*;
match impl_item.kind {
Method(ref sig, _) => {
let tcx = self.tcx;
self.visit_early_late(
Some(tcx.hir().get_parent_item(impl_item.hir_id)),
&sig.decl,
&impl_item.generics,
|this| intravisit::walk_impl_item(this, impl_item),
)
}
TyAlias(ref ty) => {
let generics = &impl_item.generics;
let mut index = self.next_early_index();
let mut non_lifetime_count = 0;
debug!("visit_ty: index = {}", index);
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Const { .. } | GenericParamKind::Type { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index: index + non_lifetime_count,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: true,
};
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
this.visit_ty(ty);
});
}
OpaqueTy(ref bounds) => {
let generics = &impl_item.generics;
let mut index = self.next_early_index();
let mut next_early_index = index;
debug!("visit_ty: index = {}", index);
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Type { .. } => {
next_early_index += 1;
None
}
GenericParamKind::Const { .. } => {
next_early_index += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: true,
};
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
});
}
Const(_, _) => {
// Only methods and types support generics.
assert!(impl_item.generics.params.is_empty());
intravisit::walk_impl_item(self, impl_item);
}
}
}
fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
debug!("visit_lifetime(lifetime_ref={:?})", lifetime_ref);
if lifetime_ref.is_elided() {
self.resolve_elided_lifetimes(vec![lifetime_ref]);
return;
}
if lifetime_ref.is_static() {
self.insert_lifetime(lifetime_ref, Region::Static);
return;
}
self.resolve_lifetime_ref(lifetime_ref);
}
fn visit_path(&mut self, path: &'tcx hir::Path, _: hir::HirId) {
for (i, segment) in path.segments.iter().enumerate() {
let depth = path.segments.len() - i - 1;
if let Some(ref args) = segment.args {
self.visit_segment_args(path.res, depth, args);
}
}
}
fn visit_fn_decl(&mut self, fd: &'tcx hir::FnDecl) {
let output = match fd.output {
hir::DefaultReturn(_) => None,
hir::Return(ref ty) => Some(&**ty),
};
self.visit_fn_like_elision(&fd.inputs, output);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
check_mixed_explicit_and_in_band_defs(self.tcx, &generics.params);
for param in &generics.params {
match param.kind {
GenericParamKind::Lifetime { .. } => {}
GenericParamKind::Type { ref default, .. } => {
walk_list!(self, visit_param_bound, ¶m.bounds);
if let Some(ref ty) = default {
self.visit_ty(&ty);
}
}
GenericParamKind::Const { ref ty, .. } => {
walk_list!(self, visit_param_bound, ¶m.bounds);
self.visit_ty(&ty);
}
}
}
for predicate in &generics.where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
ref bounded_ty,
ref bounds,
ref bound_generic_params,
..
}) => {
let lifetimes: FxHashMap<_, _> = bound_generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::late(&self.tcx.hir(), param))
}
_ => None,
})
.collect();
if !lifetimes.is_empty() {
self.trait_ref_hack = true;
let next_early_index = self.next_early_index();
let scope = Scope::Binder {
lifetimes,
s: self.scope,
next_early_index,
track_lifetime_uses: true,
opaque_type_parent: false,
};
let result = self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &bound_generic_params);
this.visit_ty(&bounded_ty);
walk_list!(this, visit_param_bound, bounds);
});
self.trait_ref_hack = false;
result
} else {
self.visit_ty(&bounded_ty);
walk_list!(self, visit_param_bound, bounds);
}
}
&hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
ref lifetime,
ref bounds,
..
}) => {
self.visit_lifetime(lifetime);
walk_list!(self, visit_param_bound, bounds);
}
&hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
ref lhs_ty,
ref rhs_ty,
..
}) => {
self.visit_ty(lhs_ty);
self.visit_ty(rhs_ty);
}
}
}
}
fn visit_poly_trait_ref(
&mut self,
trait_ref: &'tcx hir::PolyTraitRef,
_modifier: hir::TraitBoundModifier,
) {
debug!("visit_poly_trait_ref(trait_ref={:?})", trait_ref);
if !self.trait_ref_hack
|| trait_ref.bound_generic_params.iter().any(|param| match param.kind {
GenericParamKind::Lifetime { .. } => true,
_ => false,
})
{
if self.trait_ref_hack {
span_err!(
self.tcx.sess,
trait_ref.span,
E0316,
"nested quantification of lifetimes"
);
}
let next_early_index = self.next_early_index();
let scope = Scope::Binder {
lifetimes: trait_ref
.bound_generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::late(&self.tcx.hir(), param))
}
_ => None,
})
.collect(),
s: self.scope,
next_early_index,
track_lifetime_uses: true,
opaque_type_parent: false,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &trait_ref.bound_generic_params);
walk_list!(this, visit_generic_param, &trait_ref.bound_generic_params);
this.visit_trait_ref(&trait_ref.trait_ref)
})
} else {
self.visit_trait_ref(&trait_ref.trait_ref)
}
}
}
#[derive(Copy, Clone, PartialEq)]
enum ShadowKind {
Label,
Lifetime,
}
struct Original {
kind: ShadowKind,
span: Span,
}
struct Shadower {
kind: ShadowKind,
span: Span,
}
fn original_label(span: Span) -> Original {
Original { kind: ShadowKind::Label, span: span }
}
fn shadower_label(span: Span) -> Shadower {
Shadower { kind: ShadowKind::Label, span: span }
}
fn original_lifetime(span: Span) -> Original {
Original { kind: ShadowKind::Lifetime, span: span }
}
fn shadower_lifetime(param: &hir::GenericParam) -> Shadower {
Shadower { kind: ShadowKind::Lifetime, span: param.span }
}
impl ShadowKind {
fn desc(&self) -> &'static str {
match *self {
ShadowKind::Label => "label",
ShadowKind::Lifetime => "lifetime",
}
}
}
fn check_mixed_explicit_and_in_band_defs(tcx: TyCtxt<'_>, params: &P<[hir::GenericParam]>) {
let lifetime_params: Vec<_> = params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { kind, .. } => Some((kind, param.span)),
_ => None,
})
.collect();
let explicit = lifetime_params.iter().find(|(kind, _)| *kind == LifetimeParamKind::Explicit);
let in_band = lifetime_params.iter().find(|(kind, _)| *kind == LifetimeParamKind::InBand);
if let (Some((_, explicit_span)), Some((_, in_band_span))) = (explicit, in_band) {
struct_span_err!(
tcx.sess,
*in_band_span,
E0688,
"cannot mix in-band and explicit lifetime definitions"
)
.span_label(*in_band_span, "in-band lifetime definition here")
.span_label(*explicit_span, "explicit lifetime definition here")
.emit();
}
}
fn signal_shadowing_problem(tcx: TyCtxt<'_>, name: ast::Name, orig: Original, shadower: Shadower) {
let mut err = if let (ShadowKind::Lifetime, ShadowKind::Lifetime) = (orig.kind, shadower.kind) {
// lifetime/lifetime shadowing is an error
struct_span_err!(
tcx.sess,
shadower.span,
E0496,
"{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(),
name,
orig.kind.desc()
)
} else {
// shadowing involving a label is only a warning, due to issues with
// labels and lifetimes not being macro-hygienic.
tcx.sess.struct_span_warn(
shadower.span,
&format!(
"{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(),
name,
orig.kind.desc()
),
)
};
err.span_label(orig.span, "first declared here");
err.span_label(shadower.span, format!("lifetime {} already in scope", name));
err.emit();
}
// Adds all labels in `b` to `ctxt.labels_in_fn`, signalling a warning
// if one of the label shadows a lifetime or another label.
fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body<'_>) {
struct GatherLabels<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
scope: ScopeRef<'a>,
labels_in_fn: &'a mut Vec<ast::Ident>,
}
let mut gather =
GatherLabels { tcx: ctxt.tcx, scope: ctxt.scope, labels_in_fn: &mut ctxt.labels_in_fn };
gather.visit_body(body);
impl<'v, 'a, 'tcx> Visitor<'v> for GatherLabels<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &hir::Expr<'_>) {
if let Some(label) = expression_label(ex) {
for prior_label in &self.labels_in_fn[..] {
// FIXME (#24278): non-hygienic comparison
if label.name == prior_label.name {
signal_shadowing_problem(
self.tcx,
label.name,
original_label(prior_label.span),
shadower_label(label.span),
);
}
}
check_if_label_shadows_lifetime(self.tcx, self.scope, label);
self.labels_in_fn.push(label);
}
intravisit::walk_expr(self, ex)
}
}
fn expression_label(ex: &hir::Expr<'_>) -> Option<ast::Ident> {
if let hir::ExprKind::Loop(_, Some(label), _) = ex.kind { Some(label.ident) } else { None }
}
fn check_if_label_shadows_lifetime(
tcx: TyCtxt<'_>,
mut scope: ScopeRef<'_>,
label: ast::Ident,
) {
loop {
match *scope {
Scope::Body { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
Scope::Root => {
return;
}
Scope::Binder { ref lifetimes, s, .. } => {
// FIXME (#24278): non-hygienic comparison
if let Some(def) = lifetimes.get(&hir::ParamName::Plain(label.modern())) {
let hir_id = tcx.hir().as_local_hir_id(def.id().unwrap()).unwrap();
signal_shadowing_problem(
tcx,
label.name,
original_lifetime(tcx.hir().span(hir_id)),
shadower_label(label.span),
);
return;
}
scope = s;
}
}
}
}
}
fn compute_object_lifetime_defaults(tcx: TyCtxt<'_>) -> HirIdMap<Vec<ObjectLifetimeDefault>> {
let mut map = HirIdMap::default();
for item in tcx.hir().krate().items.values() {
match item.kind {
hir::ItemKind::Struct(_, ref generics)
| hir::ItemKind::Union(_, ref generics)
| hir::ItemKind::Enum(_, ref generics)
| hir::ItemKind::OpaqueTy(hir::OpaqueTy {
ref generics, impl_trait_fn: None, ..
})
| hir::ItemKind::TyAlias(_, ref generics)
| hir::ItemKind::Trait(_, _, ref generics, ..) => {
let result = object_lifetime_defaults_for_item(tcx, generics);
// Debugging aid.
if attr::contains_name(&item.attrs, sym::rustc_object_lifetime_default) {
let object_lifetime_default_reprs: String = result
.iter()
.map(|set| match *set {
Set1::Empty => "BaseDefault".into(),
Set1::One(Region::Static) => "'static".into(),
Set1::One(Region::EarlyBound(mut i, _, _)) => generics
.params
.iter()
.find_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
if i == 0 {
return Some(param.name.ident().to_string().into());
}
i -= 1;
None
}
_ => None,
})
.unwrap(),
Set1::One(_) => bug!(),
Set1::Many => "Ambiguous".into(),
})
.collect::<Vec<Cow<'static, str>>>()
.join(",");
tcx.sess.span_err(item.span, &object_lifetime_default_reprs);
}
map.insert(item.hir_id, result);
}
_ => {}
}
}
map
}
/// Scan the bounds and where-clauses on parameters to extract bounds
/// of the form `T:'a` so as to determine the `ObjectLifetimeDefault`
/// for each type parameter.
fn object_lifetime_defaults_for_item(
tcx: TyCtxt<'_>,
generics: &hir::Generics,
) -> Vec<ObjectLifetimeDefault> {
fn add_bounds(set: &mut Set1<hir::LifetimeName>, bounds: &[hir::GenericBound]) {
for bound in bounds {
if let hir::GenericBound::Outlives(ref lifetime) = *bound {
set.insert(lifetime.name.modern());
}
}
}
generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => None,
GenericParamKind::Type { .. } => {
let mut set = Set1::Empty;
add_bounds(&mut set, ¶m.bounds);
let param_def_id = tcx.hir().local_def_id(param.hir_id);
for predicate in &generics.where_clause.predicates {
// Look for `type: ...` where clauses.
let data = match *predicate {
hir::WherePredicate::BoundPredicate(ref data) => data,
_ => continue,
};
// Ignore `for<'a> type: ...` as they can change what
// lifetimes mean (although we could "just" handle it).
if !data.bound_generic_params.is_empty() {
continue;
}
let res = match data.bounded_ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => path.res,
_ => continue,
};
if res == Res::Def(DefKind::TyParam, param_def_id) {
add_bounds(&mut set, &data.bounds);
}
}
Some(match set {
Set1::Empty => Set1::Empty,
Set1::One(name) => {
if name == hir::LifetimeName::Static {
Set1::One(Region::Static)
} else {
generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some((
param.hir_id,
hir::LifetimeName::Param(param.name),
LifetimeDefOrigin::from_param(param),
)),
_ => None,
})
.enumerate()
.find(|&(_, (_, lt_name, _))| lt_name == name)
.map_or(Set1::Many, |(i, (id, _, origin))| {
let def_id = tcx.hir().local_def_id(id);
Set1::One(Region::EarlyBound(i as u32, def_id, origin))
})
}
}
Set1::Many => Set1::Many,
})
}
GenericParamKind::Const { .. } => {
// Generic consts don't impose any constraints.
None
}
})
.collect()
}
impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
// FIXME(#37666) this works around a limitation in the region inferencer
fn hack<F>(&mut self, f: F)
where
F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>),
{
f(self)
}
fn with<F>(&mut self, wrap_scope: Scope<'_>, f: F)
where
F: for<'b> FnOnce(ScopeRef<'_>, &mut LifetimeContext<'b, 'tcx>),
{
let LifetimeContext { tcx, map, lifetime_uses, .. } = self;
let labels_in_fn = take(&mut self.labels_in_fn);
let xcrate_object_lifetime_defaults = take(&mut self.xcrate_object_lifetime_defaults);
let mut this = LifetimeContext {
tcx: *tcx,
map: map,
scope: &wrap_scope,
trait_ref_hack: self.trait_ref_hack,
is_in_fn_syntax: self.is_in_fn_syntax,
labels_in_fn,
xcrate_object_lifetime_defaults,
lifetime_uses: lifetime_uses,
};
debug!("entering scope {:?}", this.scope);
f(self.scope, &mut this);
this.check_uses_for_lifetimes_defined_by_scope();
debug!("exiting scope {:?}", this.scope);
self.labels_in_fn = this.labels_in_fn;
self.xcrate_object_lifetime_defaults = this.xcrate_object_lifetime_defaults;
}
/// helper method to determine the span to remove when suggesting the
/// deletion of a lifetime
fn lifetime_deletion_span(&self, name: ast::Ident, generics: &hir::Generics) -> Option<Span> {
generics.params.iter().enumerate().find_map(|(i, param)| {
if param.name.ident() == name {
let mut in_band = false;
if let hir::GenericParamKind::Lifetime { kind } = param.kind {
if let hir::LifetimeParamKind::InBand = kind {
in_band = true;
}
}
if in_band {
Some(param.span)
} else {
if generics.params.len() == 1 {
// if sole lifetime, remove the entire `<>` brackets
Some(generics.span)
} else {
// if removing within `<>` brackets, we also want to
// delete a leading or trailing comma as appropriate
if i >= generics.params.len() - 1 {
Some(generics.params[i - 1].span.shrink_to_hi().to(param.span))
} else {
Some(param.span.to(generics.params[i + 1].span.shrink_to_lo()))
}
}
}
} else {
None
}
})
}
// helper method to issue suggestions from `fn rah<'a>(&'a T)` to `fn rah(&T)`
// or from `fn rah<'a>(T<'a>)` to `fn rah(T<'_>)`
fn suggest_eliding_single_use_lifetime(
&self,
err: &mut DiagnosticBuilder<'_>,
def_id: DefId,
lifetime: &hir::Lifetime,
) {
let name = lifetime.name.ident();
let mut remove_decl = None;
if let Some(parent_def_id) = self.tcx.parent(def_id) {
if let Some(generics) = self.tcx.hir().get_generics(parent_def_id) {
remove_decl = self.lifetime_deletion_span(name, generics);
}
}
let mut remove_use = None;
let mut elide_use = None;
let mut find_arg_use_span = |inputs: &hir::HirVec<hir::Ty>| {
for input in inputs {
match input.kind {
hir::TyKind::Rptr(lt, _) => {
if lt.name.ident() == name {
// include the trailing whitespace between the lifetime and type names
let lt_through_ty_span = lifetime.span.to(input.span.shrink_to_hi());
remove_use = Some(
self.tcx
.sess
.source_map()
.span_until_non_whitespace(lt_through_ty_span),
);
break;
}
}
hir::TyKind::Path(ref qpath) => {
if let QPath::Resolved(_, path) = qpath {
let last_segment = &path.segments[path.segments.len() - 1];
let generics = last_segment.generic_args();
for arg in generics.args.iter() {
if let GenericArg::Lifetime(lt) = arg {
if lt.name.ident() == name {
elide_use = Some(lt.span);
break;
}
}
}
break;
}
}
_ => {}
}
}
};
if let Node::Lifetime(hir_lifetime) = self.tcx.hir().get(lifetime.hir_id) {
if let Some(parent) =
self.tcx.hir().find(self.tcx.hir().get_parent_item(hir_lifetime.hir_id))
{
match parent {
Node::Item(item) => {
if let hir::ItemKind::Fn(sig, _, _) = &item.kind {
find_arg_use_span(&sig.decl.inputs);
}
}
Node::ImplItem(impl_item) => {
if let hir::ImplItemKind::Method(sig, _) = &impl_item.kind {
find_arg_use_span(&sig.decl.inputs);
}
}
_ => {}
}
}
}
let msg = "elide the single-use lifetime";
match (remove_decl, remove_use, elide_use) {
(Some(decl_span), Some(use_span), None) => {
// if both declaration and use deletion spans start at the same
// place ("start at" because the latter includes trailing
// whitespace), then this is an in-band lifetime
if decl_span.shrink_to_lo() == use_span.shrink_to_lo() {
err.span_suggestion(
use_span,
msg,
String::new(),
Applicability::MachineApplicable,
);
} else {
err.multipart_suggestion(
msg,
vec![(decl_span, String::new()), (use_span, String::new())],
Applicability::MachineApplicable,
);
}
}
(Some(decl_span), None, Some(use_span)) => {
err.multipart_suggestion(
msg,
vec![(decl_span, String::new()), (use_span, "'_".to_owned())],
Applicability::MachineApplicable,
);
}
_ => {}
}
}
fn check_uses_for_lifetimes_defined_by_scope(&mut self) {
let defined_by = match self.scope {
Scope::Binder { lifetimes, .. } => lifetimes,
_ => {
debug!("check_uses_for_lifetimes_defined_by_scope: not in a binder scope");
return;
}
};
let mut def_ids: Vec<_> = defined_by
.values()
.flat_map(|region| match region {
Region::EarlyBound(_, def_id, _)
| Region::LateBound(_, def_id, _)
| Region::Free(_, def_id) => Some(*def_id),
Region::LateBoundAnon(..) | Region::Static => None,
})
.collect();
// ensure that we issue lints in a repeatable order
def_ids.sort_by_cached_key(|&def_id| self.tcx.def_path_hash(def_id));
for def_id in def_ids {
debug!("check_uses_for_lifetimes_defined_by_scope: def_id = {:?}", def_id);
let lifetimeuseset = self.lifetime_uses.remove(&def_id);
debug!(
"check_uses_for_lifetimes_defined_by_scope: lifetimeuseset = {:?}",
lifetimeuseset
);
match lifetimeuseset {
Some(LifetimeUseSet::One(lifetime)) => {
let hir_id = self.tcx.hir().as_local_hir_id(def_id).unwrap();
debug!("hir id first={:?}", hir_id);
if let Some((id, span, name)) = match self.tcx.hir().get(hir_id) {
Node::Lifetime(hir_lifetime) => Some((
hir_lifetime.hir_id,
hir_lifetime.span,
hir_lifetime.name.ident(),
)),
Node::GenericParam(param) => {
Some((param.hir_id, param.span, param.name.ident()))
}
_ => None,
} {
debug!("id = {:?} span = {:?} name = {:?}", id, span, name);
if name.name == kw::UnderscoreLifetime {
continue;
}
if let Some(parent_def_id) = self.tcx.parent(def_id) {
if let Some(parent_hir_id) =
self.tcx.hir().as_local_hir_id(parent_def_id)
{
// lifetimes in `derive` expansions don't count (Issue #53738)
if self
.tcx
.hir()
.attrs(parent_hir_id)
.iter()
.any(|attr| attr.check_name(sym::automatically_derived))
{
continue;
}
}
}
let mut err = self.tcx.struct_span_lint_hir(
lint::builtin::SINGLE_USE_LIFETIMES,
id,
span,
&format!("lifetime parameter `{}` only used once", name),
);
if span == lifetime.span {
// spans are the same for in-band lifetime declarations
err.span_label(span, "this lifetime is only used here");
} else {
err.span_label(span, "this lifetime...");
err.span_label(lifetime.span, "...is used only here");
}
self.suggest_eliding_single_use_lifetime(&mut err, def_id, lifetime);
err.emit();
}
}
Some(LifetimeUseSet::Many) => {
debug!("not one use lifetime");
}
None => {
let hir_id = self.tcx.hir().as_local_hir_id(def_id).unwrap();
if let Some((id, span, name)) = match self.tcx.hir().get(hir_id) {
Node::Lifetime(hir_lifetime) => Some((
hir_lifetime.hir_id,
hir_lifetime.span,
hir_lifetime.name.ident(),
)),
Node::GenericParam(param) => {
Some((param.hir_id, param.span, param.name.ident()))
}
_ => None,
} {
debug!("id ={:?} span = {:?} name = {:?}", id, span, name);
let mut err = self.tcx.struct_span_lint_hir(
lint::builtin::UNUSED_LIFETIMES,
id,
span,
&format!("lifetime parameter `{}` never used", name),
);
if let Some(parent_def_id) = self.tcx.parent(def_id) {
if let Some(generics) = self.tcx.hir().get_generics(parent_def_id) {
let unused_lt_span = self.lifetime_deletion_span(name, generics);
if let Some(span) = unused_lt_span {
err.span_suggestion(
span,
"elide the unused lifetime",
String::new(),
Applicability::MachineApplicable,
);
}
}
}
err.emit();
}
}
}
}
}
/// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
///
/// Handles visiting fns and methods. These are a bit complicated because we must distinguish
/// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
/// within type bounds; those are early bound lifetimes, and the rest are late bound.
///
/// For example:
///
/// fn foo<'a,'b,'c,T:Trait<'b>>(...)
///
/// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
/// lifetimes may be interspersed together.
///
/// If early bound lifetimes are present, we separate them into their own list (and likewise
/// for late bound). They will be numbered sequentially, starting from the lowest index that is
/// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
/// bound lifetimes are resolved by name and associated with a binder ID (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late<F>(
&mut self,
parent_id: Option<hir::HirId>,
decl: &'tcx hir::FnDecl,
generics: &'tcx hir::Generics,
walk: F,
) where
F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
{
insert_late_bound_lifetimes(self.map, decl, generics);
// Find the start of nested early scopes, e.g., in methods.
let mut index = 0;
if let Some(parent_id) = parent_id {
let parent = self.tcx.hir().expect_item(parent_id);
if sub_items_have_self_param(&parent.kind) {
index += 1; // Self comes before lifetimes
}
match parent.kind {
hir::ItemKind::Trait(_, _, ref generics, ..)
| hir::ItemKind::Impl(_, _, _, ref generics, ..) => {
index += generics.params.len() as u32;
}
_ => {}
}
}
let mut non_lifetime_count = 0;
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
if self.map.late_bound.contains(¶m.hir_id) {
Some(Region::late(&self.tcx.hir(), param))
} else {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let next_early_index = index + non_lifetime_count;
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope,
opaque_type_parent: true,
track_lifetime_uses: false,
};
self.with(scope, move |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
this.hack(walk); // FIXME(#37666) workaround in place of `walk(this)`
});
}
fn next_early_index_helper(&self, only_opaque_type_parent: bool) -> u32 {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => return 0,
Scope::Binder { next_early_index, opaque_type_parent, .. }
if (!only_opaque_type_parent || opaque_type_parent) =>
{
return next_early_index;
}
Scope::Binder { s, .. }
| Scope::Body { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => scope = s,
}
}
}
/// Returns the next index one would use for an early-bound-region
/// if extending the current scope.
fn next_early_index(&self) -> u32 {
self.next_early_index_helper(true)
}
/// Returns the next index one would use for an `impl Trait` that
/// is being converted into an opaque type alias `impl Trait`. This will be the
/// next early index from the enclosing item, for the most
/// part. See the `opaque_type_parent` field for more info.
fn next_early_index_for_opaque_type(&self) -> u32 {
self.next_early_index_helper(false)
}
fn resolve_lifetime_ref(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
debug!("resolve_lifetime_ref(lifetime_ref={:?})", lifetime_ref);
// If we've already reported an error, just ignore `lifetime_ref`.
if let LifetimeName::Error = lifetime_ref.name {
return;
}
// Walk up the scope chain, tracking the number of fn scopes
// that we pass through, until we find a lifetime with the
// given name or we run out of scopes.
// search.
let mut late_depth = 0;
let mut scope = self.scope;
let mut outermost_body = None;
let result = loop {
match *scope {
Scope::Body { id, s } => {
outermost_body = Some(id);
scope = s;
}
Scope::Root => {
break None;
}
Scope::Binder { ref lifetimes, s, .. } => {
match lifetime_ref.name {
LifetimeName::Param(param_name) => {
if let Some(&def) = lifetimes.get(¶m_name.modern()) {
break Some(def.shifted(late_depth));
}
}
_ => bug!("expected LifetimeName::Param"),
}
late_depth += 1;
scope = s;
}
Scope::Elision { s, .. } | Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
};
if let Some(mut def) = result {
if let Region::EarlyBound(..) = def {
// Do not free early-bound regions, only late-bound ones.
} else if let Some(body_id) = outermost_body {
let fn_id = self.tcx.hir().body_owner(body_id);
match self.tcx.hir().get(fn_id) {
Node::Item(&hir::Item { kind: hir::ItemKind::Fn(..), .. })
| Node::TraitItem(&hir::TraitItem {
kind: hir::TraitItemKind::Method(..),
..
})
| Node::ImplItem(&hir::ImplItem {
kind: hir::ImplItemKind::Method(..), ..
}) => {
let scope = self.tcx.hir().local_def_id(fn_id);
def = Region::Free(scope, def.id().unwrap());
}
_ => {}
}
}
// Check for fn-syntax conflicts with in-band lifetime definitions
if self.is_in_fn_syntax {
match def {
Region::EarlyBound(_, _, LifetimeDefOrigin::InBand)
| Region::LateBound(_, _, LifetimeDefOrigin::InBand) => {
struct_span_err!(
self.tcx.sess,
lifetime_ref.span,
E0687,
"lifetimes used in `fn` or `Fn` syntax must be \
explicitly declared using `<...>` binders"
)
.span_label(lifetime_ref.span, "in-band lifetime definition")
.emit();
}
Region::Static
| Region::EarlyBound(_, _, LifetimeDefOrigin::ExplicitOrElided)
| Region::LateBound(_, _, LifetimeDefOrigin::ExplicitOrElided)
| Region::EarlyBound(_, _, LifetimeDefOrigin::Error)
| Region::LateBound(_, _, LifetimeDefOrigin::Error)
| Region::LateBoundAnon(..)
| Region::Free(..) => {}
}
}
self.insert_lifetime(lifetime_ref, def);
} else {
struct_span_err!(
self.tcx.sess,
lifetime_ref.span,
E0261,
"use of undeclared lifetime name `{}`",
lifetime_ref
)
.span_label(lifetime_ref.span, "undeclared lifetime")
.emit();
}
}
fn visit_segment_args(&mut self, res: Res, depth: usize, generic_args: &'tcx hir::GenericArgs) {
debug!(
"visit_segment_args(res={:?}, depth={:?}, generic_args={:?})",
res, depth, generic_args,
);
if generic_args.parenthesized {
let was_in_fn_syntax = self.is_in_fn_syntax;
self.is_in_fn_syntax = true;
self.visit_fn_like_elision(generic_args.inputs(), Some(generic_args.bindings[0].ty()));
self.is_in_fn_syntax = was_in_fn_syntax;
return;
}
let mut elide_lifetimes = true;
let lifetimes = generic_args
.args
.iter()
.filter_map(|arg| match arg {
hir::GenericArg::Lifetime(lt) => {
if !lt.is_elided() {
elide_lifetimes = false;
}
Some(lt)
}
_ => None,
})
.collect();
if elide_lifetimes {
self.resolve_elided_lifetimes(lifetimes);
} else {
lifetimes.iter().for_each(|lt| self.visit_lifetime(lt));
}
// Figure out if this is a type/trait segment,
// which requires object lifetime defaults.
let parent_def_id = |this: &mut Self, def_id: DefId| {
let def_key = this.tcx.def_key(def_id);
DefId { krate: def_id.krate, index: def_key.parent.expect("missing parent") }
};
let type_def_id = match res {
Res::Def(DefKind::AssocTy, def_id) if depth == 1 => Some(parent_def_id(self, def_id)),
Res::Def(DefKind::Variant, def_id) if depth == 0 => Some(parent_def_id(self, def_id)),
Res::Def(DefKind::Struct, def_id)
| Res::Def(DefKind::Union, def_id)
| Res::Def(DefKind::Enum, def_id)
| Res::Def(DefKind::TyAlias, def_id)
| Res::Def(DefKind::Trait, def_id)
if depth == 0 =>
{
Some(def_id)
}
_ => None,
};
debug!("visit_segment_args: type_def_id={:?}", type_def_id);
// Compute a vector of defaults, one for each type parameter,
// per the rules given in RFCs 599 and 1156. Example:
//
// ```rust
// struct Foo<'a, T: 'a, U> { }
// ```
//
// If you have `Foo<'x, dyn Bar, dyn Baz>`, we want to default
// `dyn Bar` to `dyn Bar + 'x` (because of the `T: 'a` bound)
// and `dyn Baz` to `dyn Baz + 'static` (because there is no
// such bound).
//
// Therefore, we would compute `object_lifetime_defaults` to a
// vector like `['x, 'static]`. Note that the vector only
// includes type parameters.
let object_lifetime_defaults = type_def_id.map_or(vec![], |def_id| {
let in_body = {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => break false,
Scope::Body { .. } => break true,
Scope::Binder { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
}
};
let map = &self.map;
let unsubst = if let Some(id) = self.tcx.hir().as_local_hir_id(def_id) {
&map.object_lifetime_defaults[&id]
} else {
let tcx = self.tcx;
self.xcrate_object_lifetime_defaults.entry(def_id).or_insert_with(|| {
tcx.generics_of(def_id)
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamDefKind::Type { object_lifetime_default, .. } => {
Some(object_lifetime_default)
}
GenericParamDefKind::Lifetime | GenericParamDefKind::Const => None,
})
.collect()
})
};
debug!("visit_segment_args: unsubst={:?}", unsubst);
unsubst
.iter()
.map(|set| match *set {
Set1::Empty => {
if in_body {
None
} else {
Some(Region::Static)
}
}
Set1::One(r) => {
let lifetimes = generic_args.args.iter().filter_map(|arg| match arg {
GenericArg::Lifetime(lt) => Some(lt),
_ => None,
});
r.subst(lifetimes, map)
}
Set1::Many => None,
})
.collect()
});
debug!("visit_segment_args: object_lifetime_defaults={:?}", object_lifetime_defaults);
let mut i = 0;
for arg in &generic_args.args {
match arg {
GenericArg::Lifetime(_) => {}
GenericArg::Type(ty) => {
if let Some(<) = object_lifetime_defaults.get(i) {
let scope = Scope::ObjectLifetimeDefault { lifetime: lt, s: self.scope };
self.with(scope, |_, this| this.visit_ty(ty));
} else {
self.visit_ty(ty);
}
i += 1;
}
GenericArg::Const(ct) => {
self.visit_anon_const(&ct.value);
}
}
}
// Hack: when resolving the type `XX` in binding like `dyn
// Foo<'b, Item = XX>`, the current object-lifetime default
// would be to examine the trait `Foo` to check whether it has
// a lifetime bound declared on `Item`. e.g., if `Foo` is
// declared like so, then the default object lifetime bound in
// `XX` should be `'b`:
//
// ```rust
// trait Foo<'a> {
// type Item: 'a;
// }
// ```
//
// but if we just have `type Item;`, then it would be
// `'static`. However, we don't get all of this logic correct.
//
// Instead, we do something hacky: if there are no lifetime parameters
// to the trait, then we simply use a default object lifetime
// bound of `'static`, because there is no other possibility. On the other hand,
// if there ARE lifetime parameters, then we require the user to give an
// explicit bound for now.
//
// This is intended to leave room for us to implement the
// correct behavior in the future.
let has_lifetime_parameter = generic_args.args.iter().any(|arg| match arg {
GenericArg::Lifetime(_) => true,
_ => false,
});
// Resolve lifetimes found in the type `XX` from `Item = XX` bindings.
for b in &generic_args.bindings {
let scope = Scope::ObjectLifetimeDefault {
lifetime: if has_lifetime_parameter { None } else { Some(Region::Static) },
s: self.scope,
};
self.with(scope, |_, this| this.visit_assoc_type_binding(b));
}
}
fn visit_fn_like_elision(&mut self, inputs: &'tcx [hir::Ty], output: Option<&'tcx hir::Ty>) {
debug!("visit_fn_like_elision: enter");
let mut arg_elide = Elide::FreshLateAnon(Cell::new(0));
let arg_scope = Scope::Elision { elide: arg_elide.clone(), s: self.scope };
self.with(arg_scope, |_, this| {
for input in inputs {
this.visit_ty(input);
}
match *this.scope {
Scope::Elision { ref elide, .. } => {
arg_elide = elide.clone();
}
_ => bug!(),
}
});
let output = match output {
Some(ty) => ty,
None => return,
};
debug!("visit_fn_like_elision: determine output");
// Figure out if there's a body we can get argument names from,
// and whether there's a `self` argument (treated specially).
let mut assoc_item_kind = None;
let mut impl_self = None;
let parent = self.tcx.hir().get_parent_node(output.hir_id);
let body = match self.tcx.hir().get(parent) {
// `fn` definitions and methods.
Node::Item(&hir::Item { kind: hir::ItemKind::Fn(.., body), .. }) => Some(body),
Node::TraitItem(&hir::TraitItem {
kind: hir::TraitItemKind::Method(_, ref m), ..
}) => {
if let hir::ItemKind::Trait(.., ref trait_items) =
self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(parent)).kind
{
assoc_item_kind =
trait_items.iter().find(|ti| ti.id.hir_id == parent).map(|ti| ti.kind);
}
match *m {
hir::TraitMethod::Required(_) => None,
hir::TraitMethod::Provided(body) => Some(body),
}
}
Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Method(_, body), .. }) => {
if let hir::ItemKind::Impl(.., ref self_ty, ref impl_items) =
self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(parent)).kind
{
impl_self = Some(self_ty);
assoc_item_kind =
impl_items.iter().find(|ii| ii.id.hir_id == parent).map(|ii| ii.kind);
}
Some(body)
}
// Foreign functions, `fn(...) -> R` and `Trait(...) -> R` (both types and bounds).
Node::ForeignItem(_) | Node::Ty(_) | Node::TraitRef(_) => None,
// Everything else (only closures?) doesn't
// actually enjoy elision in return types.
_ => {
self.visit_ty(output);
return;
}
};
let has_self = match assoc_item_kind {
Some(hir::AssocItemKind::Method { has_self }) => has_self,
_ => false,
};
// In accordance with the rules for lifetime elision, we can determine
// what region to use for elision in the output type in two ways.
// First (determined here), if `self` is by-reference, then the
// implied output region is the region of the self parameter.
if has_self {
struct SelfVisitor<'a> {
map: &'a NamedRegionMap,
impl_self: Option<&'a hir::TyKind>,
lifetime: Set1<Region>,
}
impl SelfVisitor<'_> {
// Look for `self: &'a Self` - also desugared from `&'a self`,
// and if that matches, use it for elision and return early.
fn is_self_ty(&self, res: Res) -> bool {
if let Res::SelfTy(..) = res {
return true;
}
// Can't always rely on literal (or implied) `Self` due
// to the way elision rules were originally specified.
if let Some(&hir::TyKind::Path(hir::QPath::Resolved(None, ref path))) =
self.impl_self
{
match path.res {
// Whitelist the types that unambiguously always
// result in the same type constructor being used
// (it can't differ between `Self` and `self`).
Res::Def(DefKind::Struct, _)
| Res::Def(DefKind::Union, _)
| Res::Def(DefKind::Enum, _)
| Res::PrimTy(_) => return res == path.res,
_ => {}
}
}
false
}
}
impl<'a> Visitor<'a> for SelfVisitor<'a> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'a> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'a hir::Ty) {
if let hir::TyKind::Rptr(lifetime_ref, ref mt) = ty.kind {
if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) = mt.ty.kind
{
if self.is_self_ty(path.res) {
if let Some(lifetime) = self.map.defs.get(&lifetime_ref.hir_id) {
self.lifetime.insert(*lifetime);
}
}
}
}
intravisit::walk_ty(self, ty)
}
}
let mut visitor = SelfVisitor {
map: self.map,
impl_self: impl_self.map(|ty| &ty.kind),
lifetime: Set1::Empty,
};
visitor.visit_ty(&inputs[0]);
if let Set1::One(lifetime) = visitor.lifetime {
let scope = Scope::Elision { elide: Elide::Exact(lifetime), s: self.scope };
self.with(scope, |_, this| this.visit_ty(output));
return;
}
}
// Second, if there was exactly one lifetime (either a substitution or a
// reference) in the arguments, then any anonymous regions in the output
// have that lifetime.
let mut possible_implied_output_region = None;
let mut lifetime_count = 0;
let arg_lifetimes = inputs
.iter()
.enumerate()
.skip(has_self as usize)
.map(|(i, input)| {
let mut gather = GatherLifetimes {
map: self.map,
outer_index: ty::INNERMOST,
have_bound_regions: false,
lifetimes: Default::default(),
};
gather.visit_ty(input);
lifetime_count += gather.lifetimes.len();
if lifetime_count == 1 && gather.lifetimes.len() == 1 {
// there's a chance that the unique lifetime of this
// iteration will be the appropriate lifetime for output
// parameters, so lets store it.
possible_implied_output_region = gather.lifetimes.iter().cloned().next();
}
ElisionFailureInfo {
parent: body,
index: i,
lifetime_count: gather.lifetimes.len(),
have_bound_regions: gather.have_bound_regions,
}
})
.collect();
let elide = if lifetime_count == 1 {
Elide::Exact(possible_implied_output_region.unwrap())
} else {
Elide::Error(arg_lifetimes)
};
debug!("visit_fn_like_elision: elide={:?}", elide);
let scope = Scope::Elision { elide, s: self.scope };
self.with(scope, |_, this| this.visit_ty(output));
debug!("visit_fn_like_elision: exit");
struct GatherLifetimes<'a> {
map: &'a NamedRegionMap,
outer_index: ty::DebruijnIndex,
have_bound_regions: bool,
lifetimes: FxHashSet<Region>,
}
impl<'v, 'a> Visitor<'v> for GatherLifetimes<'a> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &hir::Ty) {
if let hir::TyKind::BareFn(_) = ty.kind {
self.outer_index.shift_in(1);
}
match ty.kind {
hir::TyKind::TraitObject(ref bounds, ref lifetime) => {
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
// Stay on the safe side and don't include the object
// lifetime default (which may not end up being used).
if !lifetime.is_elided() {
self.visit_lifetime(lifetime);
}
}
_ => {
intravisit::walk_ty(self, ty);
}
}
if let hir::TyKind::BareFn(_) = ty.kind {
self.outer_index.shift_out(1);
}
}
fn visit_generic_param(&mut self, param: &hir::GenericParam) {
if let hir::GenericParamKind::Lifetime { .. } = param.kind {
// FIXME(eddyb) Do we want this? It only makes a difference
// if this `for<'a>` lifetime parameter is never used.
self.have_bound_regions = true;
}
intravisit::walk_generic_param(self, param);
}
fn visit_poly_trait_ref(
&mut self,
trait_ref: &hir::PolyTraitRef,
modifier: hir::TraitBoundModifier,
) {
self.outer_index.shift_in(1);
intravisit::walk_poly_trait_ref(self, trait_ref, modifier);
self.outer_index.shift_out(1);
}
fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) {
if let Some(&lifetime) = self.map.defs.get(&lifetime_ref.hir_id) {
match lifetime {
Region::LateBound(debruijn, _, _) | Region::LateBoundAnon(debruijn, _)
if debruijn < self.outer_index =>
{
self.have_bound_regions = true;
}
_ => {
self.lifetimes.insert(lifetime.shifted_out_to_binder(self.outer_index));
}
}
}
}
}
}
fn resolve_elided_lifetimes(&mut self, lifetime_refs: Vec<&'tcx hir::Lifetime>) {
debug!("resolve_elided_lifetimes(lifetime_refs={:?})", lifetime_refs);
if lifetime_refs.is_empty() {
return;
}
let span = lifetime_refs[0].span;
let mut late_depth = 0;
let mut scope = self.scope;
let mut lifetime_names = FxHashSet::default();
let error = loop {
match *scope {
// Do not assign any resolution, it will be inferred.
Scope::Body { .. } => return,
Scope::Root => break None,
Scope::Binder { s, ref lifetimes, .. } => {
// collect named lifetimes for suggestions
for name in lifetimes.keys() {
if let hir::ParamName::Plain(name) = name {
lifetime_names.insert(*name);
}
}
late_depth += 1;
scope = s;
}
Scope::Elision { ref elide, ref s, .. } => {
let lifetime = match *elide {
Elide::FreshLateAnon(ref counter) => {
for lifetime_ref in lifetime_refs {
let lifetime = Region::late_anon(counter).shifted(late_depth);
self.insert_lifetime(lifetime_ref, lifetime);
}
return;
}
Elide::Exact(l) => l.shifted(late_depth),
Elide::Error(ref e) => {
if let Scope::Binder { ref lifetimes, .. } = s {
// collect named lifetimes for suggestions
for name in lifetimes.keys() {
if let hir::ParamName::Plain(name) = name {
lifetime_names.insert(*name);
}
}
}
break Some(e);
}
};
for lifetime_ref in lifetime_refs {
self.insert_lifetime(lifetime_ref, lifetime);
}
return;
}
Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
};
let mut err = report_missing_lifetime_specifiers(self.tcx.sess, span, lifetime_refs.len());
let mut add_label = true;
if let Some(params) = error {
if lifetime_refs.len() == 1 {
add_label = add_label && self.report_elision_failure(&mut err, params, span);
}
}
if add_label {
add_missing_lifetime_specifiers_label(
&mut err,
span,
lifetime_refs.len(),
&lifetime_names,
self.tcx.sess.source_map().span_to_snippet(span).ok().as_ref().map(|s| s.as_str()),
);
}
err.emit();
}
fn suggest_lifetime(&self, db: &mut DiagnosticBuilder<'_>, span: Span, msg: &str) -> bool {
match self.tcx.sess.source_map().span_to_snippet(span) {
Ok(ref snippet) => {
let (sugg, applicability) = if snippet == "&" {
("&'static ".to_owned(), Applicability::MachineApplicable)
} else if snippet == "'_" {
("'static".to_owned(), Applicability::MachineApplicable)
} else {
(format!("{} + 'static", snippet), Applicability::MaybeIncorrect)
};
db.span_suggestion(span, msg, sugg, applicability);
false
}
Err(_) => {
db.help(msg);
true
}
}
}
fn report_elision_failure(
&mut self,
db: &mut DiagnosticBuilder<'_>,
params: &[ElisionFailureInfo],
span: Span,
) -> bool {
let mut m = String::new();
let len = params.len();
let elided_params: Vec<_> =
params.iter().cloned().filter(|info| info.lifetime_count > 0).collect();
let elided_len = elided_params.len();
for (i, info) in elided_params.into_iter().enumerate() {
let ElisionFailureInfo { parent, index, lifetime_count: n, have_bound_regions } = info;
let help_name = if let Some(ident) =
parent.and_then(|body| self.tcx.hir().body(body).params[index].pat.simple_ident())
{
format!("`{}`", ident)
} else {
format!("argument {}", index + 1)
};
m.push_str(
&(if n == 1 {
help_name
} else {
format!(
"one of {}'s {} {}lifetimes",
help_name,
n,
if have_bound_regions { "free " } else { "" }
)
})[..],
);
if elided_len == 2 && i == 0 {
m.push_str(" or ");
} else if i + 2 == elided_len {
m.push_str(", or ");
} else if i != elided_len - 1 {
m.push_str(", ");
}
}
if len == 0 {
help!(
db,
"this function's return type contains a borrowed value, but \
there is no value for it to be borrowed from"
);
self.suggest_lifetime(db, span, "consider giving it a 'static lifetime")
} else if elided_len == 0 {
help!(
db,
"this function's return type contains a borrowed value with \
an elided lifetime, but the lifetime cannot be derived from \
the arguments"
);
let msg = "consider giving it an explicit bounded or 'static lifetime";
self.suggest_lifetime(db, span, msg)
} else if elided_len == 1 {
help!(
db,
"this function's return type contains a borrowed value, but \
the signature does not say which {} it is borrowed from",
m
);
true
} else {
help!(
db,
"this function's return type contains a borrowed value, but \
the signature does not say whether it is borrowed from {}",
m
);
true
}
}
fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
debug!("resolve_object_lifetime_default(lifetime_ref={:?})", lifetime_ref);
let mut late_depth = 0;
let mut scope = self.scope;
let lifetime = loop {
match *scope {
Scope::Binder { s, .. } => {
late_depth += 1;
scope = s;
}
Scope::Root | Scope::Elision { .. } => break Region::Static,
Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
}
};
self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
}
fn check_lifetime_params(
&mut self,
old_scope: ScopeRef<'_>,
params: &'tcx [hir::GenericParam],
) {
let lifetimes: Vec<_> = params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some((param, param.name.modern())),
_ => None,
})
.collect();
for (i, (lifetime_i, lifetime_i_name)) in lifetimes.iter().enumerate() {
if let hir::ParamName::Plain(_) = lifetime_i_name {
let name = lifetime_i_name.ident().name;
if name == kw::UnderscoreLifetime || name == kw::StaticLifetime {
let mut err = struct_span_err!(
self.tcx.sess,
lifetime_i.span,
E0262,
"invalid lifetime parameter name: `{}`",
lifetime_i.name.ident(),
);
err.span_label(
lifetime_i.span,
format!("{} is a reserved lifetime name", name),
);
err.emit();
}
}
// It is a hard error to shadow a lifetime within the same scope.
for (lifetime_j, lifetime_j_name) in lifetimes.iter().skip(i + 1) {
if lifetime_i_name == lifetime_j_name {
struct_span_err!(
self.tcx.sess,
lifetime_j.span,
E0263,
"lifetime name `{}` declared twice in the same scope",
lifetime_j.name.ident()
)
.span_label(lifetime_j.span, "declared twice")
.span_label(lifetime_i.span, "previous declaration here")
.emit();
}
}
// It is a soft error to shadow a lifetime within a parent scope.
self.check_lifetime_param_for_shadowing(old_scope, &lifetime_i);
for bound in &lifetime_i.bounds {
match bound {
hir::GenericBound::Outlives(lt) => match lt.name {
hir::LifetimeName::Underscore => self.tcx.sess.delay_span_bug(
lt.span,
"use of `'_` in illegal place, but not caught by lowering",
),
hir::LifetimeName::Static => {
self.insert_lifetime(lt, Region::Static);
self.tcx
.sess
.struct_span_warn(
lifetime_i.span.to(lt.span),
&format!(
"unnecessary lifetime parameter `{}`",
lifetime_i.name.ident(),
),
)
.help(&format!(
"you can use the `'static` lifetime directly, in place of `{}`",
lifetime_i.name.ident(),
))
.emit();
}
hir::LifetimeName::Param(_) | hir::LifetimeName::Implicit => {
self.resolve_lifetime_ref(lt);
}
hir::LifetimeName::ImplicitObjectLifetimeDefault => {
self.tcx.sess.delay_span_bug(
lt.span,
"lowering generated `ImplicitObjectLifetimeDefault` \
outside of an object type",
)
}
hir::LifetimeName::Error => {
// No need to do anything, error already reported.
}
},
_ => bug!(),
}
}
}
}
fn check_lifetime_param_for_shadowing(
&self,
mut old_scope: ScopeRef<'_>,
param: &'tcx hir::GenericParam,
) {
for label in &self.labels_in_fn {
// FIXME (#24278): non-hygienic comparison
if param.name.ident().name == label.name {
signal_shadowing_problem(
self.tcx,
label.name,
original_label(label.span),
shadower_lifetime(¶m),
);
return;
}
}
loop {
match *old_scope {
Scope::Body { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => {
old_scope = s;
}
Scope::Root => {
return;
}
Scope::Binder { ref lifetimes, s, .. } => {
if let Some(&def) = lifetimes.get(¶m.name.modern()) {
let hir_id = self.tcx.hir().as_local_hir_id(def.id().unwrap()).unwrap();
signal_shadowing_problem(
self.tcx,
param.name.ident().name,
original_lifetime(self.tcx.hir().span(hir_id)),
shadower_lifetime(¶m),
);
return;
}
old_scope = s;
}
}
}
}
/// Returns `true` if, in the current scope, replacing `'_` would be
/// equivalent to a single-use lifetime.
fn track_lifetime_uses(&self) -> bool |
fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: Region) {
if lifetime_ref.hir_id == hir::DUMMY_HIR_ID {
span_bug!(
lifetime_ref.span,
"lifetime reference not renumbered, \
probably a bug in syntax::fold"
);
}
debug!(
"insert_lifetime: {} resolved to {:?} span={:?}",
self.tcx.hir().node_to_string(lifetime_ref.hir_id),
def,
self.tcx.sess.source_map().span_to_string(lifetime_ref.span)
);
self.map.defs.insert(lifetime_ref.hir_id, def);
match def {
Region::LateBoundAnon(..) | Region::Static => {
// These are anonymous lifetimes or lifetimes that are not declared.
}
Region::Free(_, def_id)
| Region::LateBound(_, def_id, _)
| Region::EarlyBound(_, def_id, _) => {
// A lifetime declared by the user.
let track_lifetime_uses = self.track_lifetime_uses();
debug!("insert_lifetime: track_lifetime_uses={}", track_lifetime_uses);
if track_lifetime_uses && !self.lifetime_uses.contains_key(&def_id) {
debug!("insert_lifetime: first use of {:?}", def_id);
self.lifetime_uses.insert(def_id, LifetimeUseSet::One(lifetime_ref));
} else {
debug!("insert_lifetime: many uses of {:?}", def_id);
self.lifetime_uses.insert(def_id, LifetimeUseSet::Many);
}
}
}
}
/// Sometimes we resolve a lifetime, but later find that it is an
/// error (esp. around impl trait). In that case, we remove the
/// entry into `map.defs` so as not to confuse later code.
fn uninsert_lifetime_on_error(&mut self, lifetime_ref: &'tcx hir::Lifetime, bad_def: Region) {
let old_value = self.map.defs.remove(&lifetime_ref.hir_id);
assert_eq!(old_value, Some(bad_def));
}
}
/// Detects late-bound lifetimes and inserts them into
/// `map.late_bound`.
///
/// A region declared on a fn is **late-bound** if:
/// - it is constrained by an argument type;
/// - it does not appear in a where-clause.
///
/// "Constrained" basically means that it appears in any type but
/// not amongst the inputs to a projection. In other words, `<&'a
/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
fn insert_late_bound_lifetimes(
map: &mut NamedRegionMap,
decl: &hir::FnDecl,
generics: &hir::Generics,
) {
debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics);
let mut constrained_by_input = ConstrainedCollector::default();
for arg_ty in &decl.inputs {
constrained_by_input.visit_ty(arg_ty);
}
let mut appears_in_output = AllCollector::default();
intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
debug!("insert_late_bound_lifetimes: constrained_by_input={:?}", constrained_by_input.regions);
// Walk the lifetimes that appear in where clauses.
//
// Subtle point: because we disallow nested bindings, we can just
// ignore binders here and scrape up all names we see.
let mut appears_in_where_clause = AllCollector::default();
appears_in_where_clause.visit_generics(generics);
for param in &generics.params {
if let hir::GenericParamKind::Lifetime { .. } = param.kind {
if !param.bounds.is_empty() {
// `'a: 'b` means both `'a` and `'b` are referenced
appears_in_where_clause
.regions
.insert(hir::LifetimeName::Param(param.name.modern()));
}
}
}
debug!(
"insert_late_bound_lifetimes: appears_in_where_clause={:?}",
appears_in_where_clause.regions
);
// Late bound regions are those that:
// - appear in the inputs
// - do not appear in the where-clauses
// - are not implicitly captured by `impl Trait`
for param in &generics.params {
match param.kind {
hir::GenericParamKind::Lifetime { .. } => { /* fall through */ }
// Neither types nor consts are late-bound.
hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => continue,
}
let lt_name = hir::LifetimeName::Param(param.name.modern());
// appears in the where clauses? early-bound.
if appears_in_where_clause.regions.contains(<_name) {
continue;
}
// does not appear in the inputs, but appears in the return type? early-bound.
if !constrained_by_input.regions.contains(<_name)
&& appears_in_output.regions.contains(<_name)
{
continue;
}
debug!(
"insert_late_bound_lifetimes: lifetime {:?} with id {:?} is late-bound",
param.name.ident(),
param.hir_id
);
let inserted = map.late_bound.insert(param.hir_id);
assert!(inserted, "visited lifetime {:?} twice", param.hir_id);
}
return;
#[derive(Default)]
struct ConstrainedCollector {
regions: FxHashSet<hir::LifetimeName>,
}
impl<'v> Visitor<'v> for ConstrainedCollector {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'v hir::Ty) {
match ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(Some(_), _))
| hir::TyKind::Path(hir::QPath::TypeRelative(..)) => {
// ignore lifetimes appearing in associated type
// projections, as they are not *constrained*
// (defined above)
}
hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
// consider only the lifetimes on the final
// segment; I am not sure it's even currently
// valid to have them elsewhere, but even if it
// is, those would be potentially inputs to
// projections
if let Some(last_segment) = path.segments.last() {
self.visit_path_segment(path.span, last_segment);
}
}
_ => {
intravisit::walk_ty(self, ty);
}
}
}
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
self.regions.insert(lifetime_ref.name.modern());
}
}
#[derive(Default)]
struct AllCollector {
regions: FxHashSet<hir::LifetimeName>,
}
impl<'v> Visitor<'v> for AllCollector {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
self.regions.insert(lifetime_ref.name.modern());
}
}
}
pub fn report_missing_lifetime_specifiers(
sess: &Session,
span: Span,
count: usize,
) -> DiagnosticBuilder<'_> {
struct_span_err!(sess, span, E0106, "missing lifetime specifier{}", pluralize!(count))
}
fn add_missing_lifetime_specifiers_label(
err: &mut DiagnosticBuilder<'_>,
span: Span,
count: usize,
lifetime_names: &FxHashSet<ast::Ident>,
snippet: Option<&str>,
) {
if count > 1 {
err.span_label(span, format!("expected {} lifetime parameters", count));
} else if let (1, Some(name), Some("&")) =
(lifetime_names.len(), lifetime_names.iter().next(), snippet)
{
err.span_suggestion(
span,
"consider using the named lifetime",
format!("&{} ", name),
Applicability::MaybeIncorrect,
);
} else {
err.span_label(span, "expected lifetime parameter");
}
}
| {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => break false,
// Inside of items, it depends on the kind of item.
Scope::Binder { track_lifetime_uses, .. } => break track_lifetime_uses,
// Inside a body, `'_` will use an inference variable,
// should be fine.
Scope::Body { .. } => break true,
// A lifetime only used in a fn argument could as well
// be replaced with `'_`, as that would generate a
// fresh name, too.
Scope::Elision { elide: Elide::FreshLateAnon(_), .. } => break true,
// In the return type or other such place, `'_` is not
// going to make a fresh name, so we cannot
// necessarily replace a single-use lifetime with
// `'_`.
Scope::Elision { elide: Elide::Exact(_), .. } => break false,
Scope::Elision { elide: Elide::Error(_), .. } => break false,
Scope::ObjectLifetimeDefault { s, .. } => scope = s,
}
}
} |
utils_test.go | package crypto
import (
"crypto/des"
"testing"
)
const (
BITS = 2048
)
const (
RsaPrivateKeyPath = "./rsa_private_key.pem"
RsaPublicKeyPath = "./rsa_public_key.pem"
)
func TestGenRsaKey(t *testing.T) {
err := GenRsaKey(BITS)
if err != nil {
t.Error("generate pub and priv key error.", err)
}
}
func TestGetFileContent(t *testing.T) {
data, err := GetFileContent(RsaPrivateKeyPath)
if err != nil {
t.Error("failed to load file.", err)
}
t.Log("success to load file: ", string(data))
}
func TestGetPrividateKey(t *testing.T) {
data, err := GetFileContent(RsaPrivateKeyPath)
if err != nil {
t.Error("failed to load file.", err)
}
privateKey, err := GetPrivateKey(data)
if err != nil {
t.Error("get providate key error: ", err)
}
t.Log("private key: ", *privateKey)
}
func TestGetPublicKey(t *testing.T) {
data, err := GetFileContent(RsaPublicKeyPath)
if err != nil {
t.Error("failed to load file.", err)
}
pubicKey, err := GetPublicKey(data)
if err != nil {
t.Error("get public key error: ", err)
}
t.Log("public key:", *pubicKey)
}
func TestZeroPadding(t *testing.T) {
var tests = []struct {
data string
key string
}{
{"[email protected]", "sfe023f_"},
}
for _, test := range tests {
t.Log("start data length", len(string(test.data)))
block, err := des.NewCipher([]byte(test.key))
if err != nil {
t.Error(err)
}
origData := ZeroPadding([]byte(test.data), block.BlockSize())
t.Logf("padding data:%v and length:%v", string(origData), len(string(origData)))
data := ZeroUnPadding(origData)
t.Logf("data is:%v and data length:%v", string(data), len(string(data)))
}
}
func TestPKCS5Padding(t *testing.T) | {
var tests = []struct {
data string
key string
}{
{"[email protected]", "sfe023f_"},
}
for _, test := range tests {
t.Log("start data length", len(string(test.data)))
block, err := des.NewCipher([]byte(test.key))
if err != nil {
t.Error(err)
}
origData := PKCS5Padding([]byte(test.data), block.BlockSize())
t.Logf("padding data:%v and length:%v", string(origData), len(string(origData)))
data := PKCS5UnPadding(origData)
t.Logf("data is:%v and data length:%v", string(data), len(string(data)))
}
} |
|
views.py | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.utils.text import slugify
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView
from blog.models import BlogPost
from custom_admin.models import User
from custom_admin.utils import Util
from .forms import LoginForm, RegisterForm, BlogPostCreateForm, BlogPostEditForm, UserEditForm
from django.shortcuts import redirect
from datetime import datetime
class Dashboard(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/dashboard.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
return render(request, self.template_name)
class Login(View):
template_name = 'custom_admin/account/login.html'
form_class = LoginForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = authenticate(request=request, email=request.POST['email'], password=request.POST['password'])
if user:
login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Incorrect Email or Password')
else: |
class Register(View):
template_name = 'custom_admin/account/register.html'
form_class = RegisterForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request=request)
self.context['form'] = form
if form.is_valid():
try:
user = User.objects.create_user(email=request.POST['email'], password=request.POST['password'])
except ValidationError as e:
[messages.error(request, error[0]) for error in e.message_dict.values()]
else:
return redirect('login')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Logout(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login'))
class BlogList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
queryset = BlogPost.objects.all()
paginate_by = 10
context_object_name = 'blog_post'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class BlogCreate(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/create.html'
login_url = reverse_lazy('login')
form_class = BlogPostCreateForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
self.context.clear()
self.context['ckeditor'] = True
print(self.context)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request.FILES)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
BlogPost.objects.create(
created_by=request.user,
title_image=form.cleaned_data.get('title_image', ''),
title=form.cleaned_data.get('title'),
description=form.cleaned_data.get('bp_description'),
slug=slugify(form.cleaned_data.get('title'))
)
messages.success(self.request, 'Blog has been created successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/edit.html'
login_url = reverse_lazy('login')
form_class = BlogPostEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['ckeditor'] = True
self.context['blog'] = BlogPost.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES, pk=self.context['blog'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
blog = self.context['blog']
blog.title_image = form.cleaned_data.get('title_image', '') or blog.title_image
blog.title = form.cleaned_data.get('title')
blog.is_verified = form.cleaned_data.get('is_verified')
blog.published_on = datetime.now() if form.cleaned_data.get('is_verified') and not blog.published_on else blog.published_on
blog.description = form.cleaned_data.get('bp_description')
blog.slug = slugify(form.cleaned_data.get('title'))
blog.save()
messages.success(self.request, 'Blog has been updated successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogDelete(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
BlogPost.objects.get(pk=kwargs['pk']).delete()
messages.success(self.request, 'Blog has been deleted successfully.')
return HttpResponseRedirect(reverse('blog-list'))
class UserList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/user/list.html'
login_url = reverse_lazy('login')
queryset = User.objects.all()
paginate_by = 10
context_object_name = 'user_list'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class UserEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/user/edit.html'
login_url = reverse_lazy('login')
form_class = UserEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
form = self.form_class(request.POST, request.FILES, pk=self.context['user'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
user = self.context['user']
user.avatar = form.cleaned_data.get('avatar') or user.avatar
user.first_name = form.cleaned_data.get('first_name', '')
user.last_name = form.cleaned_data.get('last_name', '')
user.phone = form.cleaned_data.get('phone', '')
user.is_superuser = form.cleaned_data.get('is_superuser', False)
user.is_staff = form.cleaned_data.get('is_staff', False)
user.is_active = form.cleaned_data.get('is_active', False)
user.save()
messages.success(self.request, 'User has been updated successfully.')
return HttpResponseRedirect(reverse('user-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
print('Error:', error)
return render(request, self.template_name, self.context) | error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
|
exports-star.js | import a from 'exports-star/foo/a';
import b from 'exports-star/foo/b';
import c from 'exports-star/foo/bar/c';
| export default { a, b, c }; | |
util_test.go | package util_test
import (
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/akutz/gotil"
"github.com/rexray/rexray/libstorage/api/context"
"github.com/rexray/rexray/libstorage/api/types"
"github.com/rexray/rexray/libstorage/api/utils"
"github.com/rexray/rexray/util"
)
var r10 string
var tmpPrefixDirs []string
func | (m *testing.M) {
r10 = gotil.RandomString(10)
exitCode := m.Run()
for _, d := range tmpPrefixDirs {
os.RemoveAll(d)
}
os.Exit(exitCode)
}
func newTestContext(testName string, t *testing.T) types.Context {
tmpDir, err := ioutil.TempDir(
"", fmt.Sprintf("rexray-util_test-%s", testName))
if err != nil {
t.Fatal(err)
}
pathConfig := utils.NewPathConfig(tmpDir)
tmpPrefixDirs = append(tmpPrefixDirs, tmpDir)
return context.WithValue(
context.Background(),
context.PathConfigKey, pathConfig)
}
func TestStdOutAndLogFile(t *testing.T) {
ctx := newTestContext("TestStdOutAndLogFile", t)
if _, err := util.StdOutAndLogFile(ctx, "BadFile/ (*$"); err == nil {
t.Fatal("error expected in created BadFile")
}
out, err := util.StdOutAndLogFile(ctx, "TestStdOutAndLogFile")
if err != nil {
t.Fatal(err)
}
if out == nil {
t.Fatal("out == nil")
}
}
func TestWriteReadCurrentPidFile(t *testing.T) {
ctx := newTestContext("TestWriteReadPidFile", t)
var err error
var pidRead int
pid := os.Getpid()
if err = util.WritePidFile(ctx, -1); err != nil {
t.Fatalf("error writing pidfile=%s", util.PidFilePath(ctx))
}
if pidRead, err = util.ReadPidFile(ctx); err != nil {
t.Fatalf("error reading pidfile=%s", util.PidFilePath(ctx))
}
if pidRead != pid {
t.Fatalf("pidRead=%d != pid=%d", pidRead, pid)
}
}
func TestWriteReadCustomPidFile(t *testing.T) {
ctx := newTestContext("TestWriteReadCustomPidFile", t)
var err error
if _, err = util.ReadPidFile(ctx); err == nil {
t.Fatal("error expected in reading pid file")
}
pidWritten := int(time.Now().Unix())
if err = util.WritePidFile(ctx, pidWritten); err != nil {
t.Fatalf("error writing pidfile=%s", util.PidFilePath(ctx))
}
var pidRead int
if pidRead, err = util.ReadPidFile(ctx); err != nil {
t.Fatalf("error reading pidfile=%s", util.PidFilePath(ctx))
}
if pidRead != pidWritten {
t.Fatalf("pidRead=%d != pidWritten=%d", pidRead, pidWritten)
}
}
func TestReadPidFileWithErrors(t *testing.T) {
ctx := newTestContext("TestReadPidFileWithErrors", t)
var err error
if _, err = util.ReadPidFile(ctx); err == nil {
t.Fatal("error expected in reading pid file")
}
gotil.WriteStringToFile("hello", util.PidFilePath(ctx))
if _, err = util.ReadPidFile(ctx); err == nil {
t.Fatal("error expected in reading pid file")
}
}
func TestInstall(t *testing.T) {
util.Install()
}
func TestInstallChownRoot(t *testing.T) {
util.InstallChownRoot()
}
func TestInstallDirChownRoot(t *testing.T) {
util.InstallDirChownRoot("--help")
}
func TestFindFlagArgs(t *testing.T) {
{
val, indices := util.FindFlagVal(
"-l", "-f", "-l", "debug", "--service=vfs")
if val != "debug" {
t.Fatalf("val != debug: %s", val)
}
if len(indices) != 2 && indices[0] != 1 && indices[1] != 2 {
t.Fatalf("invalid indices: %v", indices)
}
}
{
val, indices := util.FindFlagVal(
"-l", "-f", "--service=vfs", "-l", "debug")
if val != "debug" {
t.Fatalf("val != debug: %s", val)
}
if len(indices) != 2 && indices[0] != 2 && indices[1] != 3 {
t.Fatalf("invalid indices: %v", indices)
}
}
{
val, indices := util.FindFlagVal(
"--logLevel", "-f", "--service=vfs", "--loglevel", "debug")
if val != "debug" {
t.Fatalf("val != debug: %s", val)
}
if len(indices) != 2 && indices[0] != 2 && indices[1] != 3 {
t.Fatalf("invalid indices: %v", indices)
}
}
{
val, indices := util.FindFlagVal(
"--logLevel", "-f", "--service=vfs", "--loglevel=debug")
if val != "debug" {
t.Fatalf("val != debug: %s", val)
}
if len(indices) != 1 && indices[0] != 2 {
t.Fatalf("invalid indices: %v", indices)
}
}
{
val, indices := util.FindFlagVal(
"-l", "rexray", "-l")
if val != "" {
t.Fatalf("val != '': %s", val)
}
if len(indices) != 1 && indices[0] != 1 {
t.Fatalf("invalid indices: %v", indices)
}
}
{
val, indices := util.FindFlagVal(
"--logLevel", "rexray", "--logLevel")
if val != "" {
t.Fatalf("val != '': %s", val)
}
if len(indices) != 1 && indices[0] != 1 {
t.Fatalf("invalid indices: %v", indices)
}
}
{
val, indices := util.FindFlagVal(
"--logLevel", "rexray", "--logLevel=")
if val != "" {
t.Fatalf("val != '': %s", val)
}
if len(indices) != 1 && indices[0] != 1 {
t.Fatalf("invalid indices: %v", indices)
}
}
}
| TestMain |
graph_builder.py | import itertools
import dgl
import torch
from rdkit import Chem
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from fgh_gnn.utils import FGROUP_MOLS, get_ring_fragments, ogb_graph_to_mol
class FGroupHetGraphBuilder:
def __init__(self, vocab):
self.vocab = vocab
self.fgroup_vocab = vocab.loc[vocab['type'] == 'fgroup']
self.ring_vocab = vocab.loc[vocab['type'] == 'ring']
self.ring_smiles_set = set(self.ring_vocab['name'].unique())
self.misc_ring_idx = len(vocab) - 1
def build_fgroup_heterograph(self, raw_graph):
atom_feats = torch.from_numpy(raw_graph['node_feat'])
bond_feats = torch.from_numpy(raw_graph['edge_feat'])
a2a_edges = torch.from_numpy(raw_graph['edge_index'])
# build tree
mol = ogb_graph_to_mol(raw_graph)
clusters = self._make_clusters(mol)
cluster_feats = torch.tensor([c.features for c in clusters],
dtype=torch.long)
c2atom_edges, atom2c_edges = self._make_inter_edges(clusters)
c2c_edges, overlap_feats = \
self._make_intracluster_edges(raw_graph, clusters)
data_dict = {
('atom', 'bond', 'atom'): (a2a_edges[0], a2a_edges[1]),
('cluster', 'refine', 'atom'): (c2atom_edges[0], c2atom_edges[1]),
('atom', 'pool', 'cluster'): (atom2c_edges[0], atom2c_edges[1]),
('cluster', 'overlap', 'cluster'): (c2c_edges[0], c2c_edges[1])
}
num_nodes_dict = {
'atom': raw_graph['num_nodes'],
'cluster': len(clusters)
}
g = dgl.heterograph(data_dict=data_dict, num_nodes_dict=num_nodes_dict)
g.nodes['atom'].data['x'] = atom_feats
g.nodes['cluster'].data['x'] = cluster_feats
g.edges['bond'].data['x'] = bond_feats
g.edges['overlap'].data['x'] = overlap_feats
return g
def _make_clusters(self, mol):
clusters = []
# add all functional groups
for row in self.fgroup_vocab.itertuples():
row_idx = row.Index
fgroup_query = FGROUP_MOLS[row.name]
matches = mol.GetSubstructMatches(fgroup_query)
for match_idxs in matches:
clusters.append(Cluster(row_idx, 'fgroup', match_idxs))
# add all rings
for ring_idxs in get_ring_fragments(mol):
ring_smiles = Chem.MolFragmentToSmiles(mol, list(ring_idxs),
isomericSmiles=False,
kekuleSmiles=True)
if ring_smiles in self.ring_smiles_set:
row_idx = self.ring_vocab.index[self.ring_vocab['name']
== ring_smiles]
row_idx = int(row_idx[0])
else:
row_idx = self.misc_ring_idx
clusters.append(Cluster(row_idx, 'ring', ring_idxs))
# add all remaining singular atoms
leftover_atoms = set(range(mol.GetNumAtoms()))
for cluster in clusters:
leftover_atoms.difference_update(cluster.atom_idxs)
for atom_idx in leftover_atoms:
atomic_num = mol.GetAtomWithIdx(atom_idx).GetAtomicNum()
clusters.append(Cluster(atomic_num, 'atom', (atom_idx,)))
return clusters
def _make_inter_edges(self, clusters):
c2atom_edges = [[], []]
atom2c_edges = [[], []]
for cluster_idx, cluster in enumerate(clusters):
for atom_idx in cluster.atom_idxs:
c2atom_edges[0].append(cluster_idx)
c2atom_edges[1].append(atom_idx)
atom2c_edges[0].append(atom_idx)
atom2c_edges[1].append(cluster_idx)
c2atom_edges = torch.tensor(c2atom_edges, dtype=torch.long)
atom2c_edges = torch.tensor(atom2c_edges, dtype=torch.long)
return c2atom_edges, atom2c_edges
def _make_intracluster_edges(self, raw_graph, clusters):
edge_index = raw_graph['edge_index']
edge_dict = {i: set() for i in range(raw_graph['num_nodes'])}
for i, j in zip(edge_index[0], edge_index[1]):
edge_dict[i].add(j)
num_clusters = len(clusters)
adj_matrix = [[0] * num_clusters for _ in range(num_clusters)]
cluster_neighbours = []
for cluster in clusters:
neighbours = set()
for atom_idx in cluster.atom_idxs:
neighbours.add(atom_idx)
neighbours.update(edge_dict[atom_idx])
cluster_neighbours.append(neighbours)
for i, j in itertools.combinations(range(num_clusters), r=2):
ci, cj = clusters[i], clusters[j]
if ci.atom_idxs & cj.atom_idxs:
edge_weight = len(ci.atom_idxs & cj.atom_idxs) + 1
elif cluster_neighbours[i] & cluster_neighbours[j]:
edge_weight = 1
else:
continue
adj_matrix[i][j] = edge_weight
adj_matrix[j][i] = edge_weight
# build spanning tree
adj_matrix = csr_matrix(adj_matrix)
span_tree = minimum_spanning_tree(adj_matrix, overwrite=True)
adj_matrix = torch.from_numpy(span_tree.toarray()).long()
adj_matrix = to_bidirectional(adj_matrix)
# represent as sparse matrix
adj_matrix = adj_matrix.to_sparse().coalesce()
edge_index = adj_matrix.indices()
edge_feats = adj_matrix.values()
return edge_index, edge_feats
class Cluster:
def __init__(self, vocab_id, cluster_type, atom_idxs):
# for sanity
if not isinstance(vocab_id, int):
raise ValueError()
self.vocab_id = vocab_id
self.cluster_type_idx = ('fgroup', 'ring', 'atom').index(cluster_type)
self.atom_idxs = frozenset(atom_idxs)
self.features = [self.vocab_id, self.cluster_type_idx]
# Helper Method
def to_bidirectional(X):
| X_T = X.t()
sym_sum = X + X_T
X_min = torch.min(X, X_T)
return torch.where(X_min > 0, X_min, sym_sum) |
|
BUTTON.tsx | import { Asset } from "contentful"
import * as React from "react"
import Button, { BTN, SIZE } from "src/shared/Button.3"
import { Entry } from "contentful"
export type Props = {
words: string
href: string
kind: BTN.NAV | BTN.DARKNAV | BTN.PRIMARY
assetLink?: Asset
size?: SIZE
align?: "center" | "flex-start" | "flex-end"
}
export const BUTTON = {
button: ({ fields }: Entry<Props>) => (
<Button
text={fields.words} | size={fields.size}
align={fields.align}
target={fields.assetLink?.fields?.file?.url || (fields.href?.startsWith("http") && "_blank")}
/>
),
} | href={fields.href || fields.assetLink?.fields?.file?.url}
kind={fields.kind} |
panel.py | from typing import List, Dict, Callable, Any, NamedTuple, TYPE_CHECKING
from pyri.plugins import util as plugin_util
if TYPE_CHECKING:
from .. import PyriWebUIBrowser
class PyriWebUIBrowserPanelInfo(NamedTuple):
title: str
panel_type: str
priority: int
class PyriWebUIBrowserPanelBase:
pass
class PyriWebUIBrowserPanelPluginFactory:
def __init__(self):
super().__init__()
def | (self) -> str:
return ""
def get_panels_infos(self) -> Dict[str,PyriWebUIBrowserPanelInfo]:
return []
async def add_panel(self, panel_type: str, core: "PyriWebUIBrowser", parent_element: Any) -> PyriWebUIBrowserPanelBase:
raise NotImplementedError()
def get_webui_browser_panel_factories() -> List[PyriWebUIBrowserPanelPluginFactory]:
return plugin_util.get_plugin_factories("pyri.plugins.webui_browser_panel")
def get_all_webui_browser_panels_infos() -> Dict[str,Any]:
ret = dict()
factories = get_webui_browser_panel_factories()
for factory in factories:
ret[factory.get_plugin_name()] = factory.get_panels_infos()
return ret
async def add_webui_browser_panel(panel_type: str, core: "PyriWebUIBrowser", parent_element: Any) -> Dict[str,Any]:
factories = get_webui_browser_panel_factories()
for factory in factories:
infos = factory.get_panels_infos()
if panel_type in infos:
return await factory.add_panel(panel_type, core, parent_element)
assert False, f"Unknown panel_type \"{panel_type}\" specified"
| get_plugin_name |
topology_command.go | // Copyright © 2018 Camunda Services GmbH ([email protected])
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package commands
import (
"context"
"github.com/zeebe-io/zeebe/clients/go/pkg/pb"
"time"
)
type TopologyCommand struct {
gateway pb.GatewayClient
requestTimeout time.Duration
retryPredicate func(error) bool
}
func (cmd *TopologyCommand) Send() (*pb.TopologyResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), cmd.requestTimeout)
defer cancel()
request := &pb.TopologyRequest{}
response, err := cmd.gateway.Topology(ctx, request)
if cmd.retryPredicate(err) {
return cmd.Send()
}
return response, err
}
func NewTopologyCommand(gateway pb.GatewayClient, requestTimeout time.Duration, retryPredicate func(error) bool) *TopologyCommand { |
return &TopologyCommand{
gateway: gateway,
requestTimeout: requestTimeout,
retryPredicate: retryPredicate,
}
}
|
|
spec.ts | #!/usr/bin/env node
// tslint:disable-next-line: no-reference
/// <reference path='../../node_modules/cypress/types/cypress-npm-api.d.ts'/>
import * as CypressNpmApi from "cypress";
import { slackRunner } from "../slack/slack-alert";
// tslint:disable: no-var-requires
const marge = require("mochawesome-report-generator");
const { merge } = require("mochawesome-merge");
const del = require("del");
// tslint:disable: no-var-requires
CypressNpmApi.run({
reporter: "cypress-multi-reporters",
reporterOptions: {
reporterEnabled: "mocha-junit-reporter, mochawesome",
mochaJunitReporterReporterOptions: {
mochaFile: "cypress/reports/junit/test_results[hash].xml",
toConsole: false,
},
mochawesomeReporterOptions: {
reportDir: "cypress/reports/mocha",
quiet: true,
overwrite: false,
html: false,
json: true,
},
},
})
.then(async (results) => {
const generatedReport = await Promise.resolve(
generateReport({
files: ["cypress/reports/mocha/*.json"],
inline: true,
saveJson: true,
})
);
// tslint:disable-next-line: no-console
console.log("Merged report available here:-", generatedReport);
return generatedReport;
})
.then(async (delFiles) => {
await del(["cypress/reports/mocha/mochawesome_*.json"]);
})
.then((generatedReport) => {
const program: any = {
ciProvider: "circleci",
videoDir: `cypress/videos`,
vcsProvider: "github",
screenshotDir: `cypress/screenshots`,
verbose: true,
reportDir: `mochawesome-report`,
};
const ciProvider: string = program.ciProvider;
const vcsRoot: string = program.vcsProvider;
const reportDir: string = program.reportDir;
const videoDir: string = program.videoDir;
const screenshotDir: string = program.screenshotDir;
const verbose: boolean = program.verbose;
// tslint:disable-next-line: no-console
console.log("Constructing Slack message with the following options", {
ciProvider,
vcsRoot,
reportDir, | verbose,
});
slackRunner({
ciProvider,
vcsRoot,
reportDir,
videoDir,
screenshotDir,
});
// tslint:disable-next-line: no-console
console.log("Finished slack upload");
})
.catch((err: any) => {
// tslint:disable-next-line: no-console
console.log(err);
});
function generateReport(options: any) {
return merge(options).then((report: any) => marge.create(report, options));
} | videoDir,
screenshotDir, |
server.go | // Copyright (c) 2020-2022 Cisco and/or its affiliates.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package kernelvethpair
import (
"context"
"git.fd.io/govpp.git/api"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/networkservicemesh/sdk-vpp/pkg/networkservice/mechanisms/kernel/kernelvethpair/mtu"
"github.com/networkservicemesh/api/pkg/api/networkservice"
"github.com/networkservicemesh/sdk/pkg/networkservice/core/chain"
"github.com/networkservicemesh/sdk/pkg/networkservice/core/next"
"github.com/networkservicemesh/sdk/pkg/networkservice/utils/metadata"
"github.com/networkservicemesh/sdk/pkg/tools/postpone"
"github.com/networkservicemesh/sdk-vpp/pkg/networkservice/mechanisms/kernel/kernelvethpair/afpacket"
"github.com/networkservicemesh/sdk-vpp/pkg/networkservice/mechanisms/kernel/kernelvethpair/ipneighbor"
)
type kernelVethPairServer struct{}
// NewServer - return a new Server chain element implementing the kernel mechanism with vpp using a veth pair
func NewServer(vppConn api.Connection) networkservice.NetworkServiceServer {
return chain.NewNetworkServiceServer(
ipneighbor.NewServer(vppConn),
afpacket.NewServer(vppConn),
mtu.NewServer(),
&kernelVethPairServer{},
)
}
func (k *kernelVethPairServer) Request(ctx context.Context, request *networkservice.NetworkServiceRequest) (*networkservice.Connection, error) {
postponeCtxFunc := postpone.ContextWithValues(ctx)
conn, err := next.Server(ctx).Request(ctx, request)
if err != nil {
return nil, err
} | if err := create(ctx, request.GetConnection(), false); err != nil {
closeCtx, cancelClose := postponeCtxFunc()
defer cancelClose()
if _, closeErr := k.Close(closeCtx, conn); closeErr != nil {
err = errors.Wrapf(err, "connection closed with error: %s", closeErr.Error())
}
return nil, err
}
return conn, nil
}
func (k *kernelVethPairServer) Close(ctx context.Context, conn *networkservice.Connection) (*empty.Empty, error) {
_ = del(ctx, conn, metadata.IsClient(k))
return next.Server(ctx).Close(ctx, conn)
} | |
extractor.go | /*
* Copyright 2016 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package bazel implements the internal plumbing of a configurable Bazel
// compilation unit extractor.
package bazel
import (
"context"
"fmt"
"io"
"log"
"os"
"sort"
"sync"
"time"
"kythe.io/kythe/go/platform/kindex"
"kythe.io/kythe/go/platform/kzip"
"kythe.io/kythe/go/util/vnameutil"
"bitbucket.org/creachadair/stringset"
"github.com/golang/protobuf/proto"
"golang.org/x/sync/errgroup"
apb "kythe.io/kythe/proto/analysis_go_proto"
spb "kythe.io/kythe/proto/storage_go_proto"
xapb "kythe.io/third_party/bazel/extra_actions_base_go_proto"
)
// A Config carries settings that control the extraction process.
//
// By default, all input files are captured as required inputs, all specified
// environment variables are stored, all command-line arguments are recorded,
// and the "owner" of the extra action is marked as the build target in a build
// details message.
//
// The caller may override these behaviours by providing callbacks to handle
// various stages of the extraction process. Schematically, the extractor does
// the following steps:
//
// CheckAction .. CheckInputs/Env .. Fetch .. Fixup
//
// The "CheckAction" stage gives the caller an opportunity to preprocess the
// action and decide whether to continue. The caller may modify the ActionInfo
// during this process if it wishes.
//
// Next, each input file is checked for inclusion in the compilation, and for
// whether it should be counted as a source file for the compilation. Also,
// each environment variable is checked for inclusion.
//
// The "Fetch" stage reads the contents of the required input files selected
// during the previous stage, computes their digests, and packs them into the
// compilation record.
//
// Finally, the "Fixup" stage gives the caller a final opportunity to edit the
// resulting compilation record before it is returned.
type Config struct {
Corpus string // the default corpus label to use
Language string // the language label to apply
Rules vnameutil.Rules // rules for rewriting file VNames
Verbose bool // whether to emit verbose (per-file) logging
// If set, this function checks whether the given spawn action should be
// further processed. If it returns an error, the action will be rejected.
// Otherwise, all actions will be processed.
//
// This function may modify its argument, and such changes will be
// preserved as this is invoked before further processing.
CheckAction func(context.Context, *ActionInfo) error
// If set, this function reports whether an input path should be kept in
// the resulting compilation unit, and returns an optionally-modified
// version of the path. Otherwise, all inputs are kept.
CheckInput func(string) (string, bool)
// If set, this function reports whether an environment variable should be
// kept in the resulting compilation unit. Otherwise, all environment
// variables are kept.
CheckEnv func(name, value string) bool
// If set, this function reports whether an input path should be considered
// a source file. Otherwise, no inputs are recorded as sources. The path
// given to this function reflects any modifications made by CheckInput.
IsSource func(string) bool
// If set, this function is called with the updated compilation prior to
// returning it, and may edit the result. If the function reports an error,
// that error is propagated along with the compilation.
FixUnit func(*apb.CompilationUnit) error
// If set, this function is used to open files for reading. If nil,
// os.Open is used.
OpenRead func(context.Context, string) (io.ReadCloser, error)
}
func (c *Config) checkAction(ctx context.Context, info *ActionInfo) error {
if check := c.CheckAction; check != nil {
return check(ctx, info)
}
return nil
}
func (c *Config) checkInput(path string) (string, bool) {
if keep := c.CheckInput; keep != nil {
return keep(path)
}
return path, true
}
func (c *Config) checkEnv(name, value string) bool {
if keep := c.CheckEnv; keep != nil {
return keep(name, value)
}
return false
}
func (c *Config) isSource(path string) bool {
if src := c.IsSource; src != nil {
return src(path)
}
return false
}
func (c *Config) fixup(cu *apb.CompilationUnit) error {
if fix := c.FixUnit; fix != nil {
return fix(cu)
}
return nil
}
func (c *Config) openRead(ctx context.Context, path string) (io.ReadCloser, error) {
if open := c.OpenRead; open != nil {
return open(ctx, path)
}
return os.Open(path)
}
func (c *Config) logPrintf(msg string, args ...interface{}) {
if c.Verbose {
log.Printf(msg, args...)
}
}
// ExtractToFile extracts a compilation from the specified extra action info,
// and writes it along with its required inputs to w. The unit digest of the
// stored compilation is returned.
func (c *Config) ExtractToFile(ctx context.Context, info *ActionInfo, w *kzip.Writer) (string, error) {
cu, err := c.extract(ctx, info, func(ri *apb.CompilationUnit_FileInput, r io.Reader) error {
digest, err := w.AddFile(r)
if err == nil {
ri.Info.Digest = digest
}
return err
})
if err != nil {
return "", err
} else if err := c.fixup(cu); err != nil {
return "", err
}
return w.AddUnit(cu, nil)
}
// Extract extracts a compilation from the specified extra action info.
func (c *Config) Extract(ctx context.Context, info *ActionInfo) (*kindex.Compilation, error) {
var files []*apb.FileData
cu, err := c.extract(ctx, info, func(ri *apb.CompilationUnit_FileInput, r io.Reader) error {
fd, err := kindex.FileData(ri.Info.Path, r)
if err == nil {
ri.Info.Digest = fd.Info.Digest
files = append(files, fd)
}
return err
})
if err != nil {
return nil, err
}
return &kindex.Compilation{Proto: cu, Files: files}, nil
}
type fileReader func(*apb.CompilationUnit_FileInput, io.Reader) error
// extract extracts a compilation from the specified extra action info.
func (c *Config) extract(ctx context.Context, info *ActionInfo, file fileReader) (*apb.CompilationUnit, error) {
log.Printf("Extracting XA for %q with %d inputs", info.Target, len(info.Inputs))
if err := c.checkAction(ctx, info); err != nil {
return nil, err
}
// Construct the basic compilation.
cu := &apb.CompilationUnit{
VName: &spb.VName{
Language: c.Language,
Corpus: c.Corpus,
},
Argument: info.Arguments,
}
// Capture the primary output path. Although the action has room for
// multiple outputs, we expect only one to be set in practice. It's
// harmless if there are more, though, so don't fail for that.
if len(info.Outputs) > 0 {
cu.OutputKey = info.Outputs[0]
}
// Capture environment variables.
for name, value := range info.Environment {
if c.checkEnv(name, value) {
cu.Environment = append(cu.Environment, &apb.CompilationUnit_Env{
Name: name,
Value: value,
})
}
}
// Capture the build system details.
if err := SetTarget(info.Target, info.Rule, cu); err != nil {
log.Printf("ERROR: Adding build details: %v", err)
}
// Load and populate file contents and required inputs. First scan the
// inputs and filter out which ones we actually want to keep by path
// inspection; then load the contents concurrently.
sort.Strings(info.Inputs) // ensure a consistent order
inputs := c.classifyInputs(info, cu)
start := time.Now()
if err := c.fetchInputs(ctx, inputs, func(i int, r io.Reader) error {
return file(cu.RequiredInput[i], r)
}); err != nil {
return nil, fmt.Errorf("reading input files failed: %v", err)
}
log.Printf("Finished reading required inputs [%v elapsed]", time.Since(start))
return cu, c.fixup(cu)
}
// fetchInputs concurrently fetches the contents of all the specified file
// paths. An open reader for each file is passed to the file callback along
// with its path's offset in the input slice. If the callback returns an error,
// that error is propagated.
func (c *Config) fetchInputs(ctx context.Context, paths []string, file func(int, io.Reader) error) error {
// Fetch concurrently. Each element of the proto slices is accessed by a
// single goroutine corresponding to its index.
throttle := make(chan struct{}, 256)
var g errgroup.Group
var fmu sync.Mutex // coordinates access into the file callback
for i, path := range paths {
i, path := i, path
g.Go(func() error {
throttle <- struct{}{}
defer func() { <-throttle }()
rc, err := c.openRead(ctx, path)
if err != nil {
log.Printf("ERROR: Reading input file: %v", err)
return err
}
defer rc.Close()
fmu.Lock()
defer fmu.Unlock()
return file(i, rc)
})
}
return g.Wait()
}
// classifyInputs updates unit to add required inputs for each matching path
// and to identify source inputs according to the rules of c. The filtered
// complete list of inputs paths is returned.
func (c *Config) classifyInputs(info *ActionInfo, unit *apb.CompilationUnit) []string {
var inputs, sourceFiles stringset.Set
for _, in := range info.Inputs {
path, ok := c.checkInput(in)
if ok {
inputs.Add(path)
if c.isSource(path) {
sourceFiles.Add(path)
c.logPrintf("Matched source file from inputs: %q", path)
}
vname, ok := c.Rules.Apply(path)
if !ok {
vname = &spb.VName{Corpus: c.Corpus, Path: path}
}
// Add the skeleton of a required input carrying the vname.
// File info (path, digest) are populated during fetch.
unit.RequiredInput = append(unit.RequiredInput, &apb.CompilationUnit_FileInput{
VName: vname,
Info: &apb.FileInfo{Path: path},
})
} else {
c.logPrintf("Excluding input file: %q", in)
}
}
for _, src := range info.Sources {
if inputs.Contains(src) {
c.logPrintf("Matched source file from action: %q", src)
sourceFiles.Add(src)
}
}
unit.SourceFile = sourceFiles.Elements()
log.Printf("Found %d required inputs, %d source files", len(inputs), len(sourceFiles))
return inputs.Elements()
}
// ActionInfo represents the action metadata relevant to the extraction process. | Outputs []string // output file paths
Sources []string // source file paths
Environment map[string]string // environment variables
Target string // build target name
Rule string // rule class name
// Paths in Sources are expected to be a subset of inputs. In particular
// the extractor will keep such a path only if it also appears in the
// Inputs, and has been selected by the other rules provided by the caller.
//
// Such paths, if there are any, are taken in addition to any source files
// identified by the extraction rules provided by the caller.
}
// Setenv updates the Environment field with the specified key-value pair.
func (a *ActionInfo) Setenv(key, value string) {
if a.Environment == nil {
a.Environment = map[string]string{key: value}
} else {
a.Environment[key] = value
}
}
// SpawnAction generates an *ActionInfo from a spawn action.
// It is an error if info does not contain a SpawnInfo.
func SpawnAction(info *xapb.ExtraActionInfo) (*ActionInfo, error) {
msg, err := proto.GetExtension(info, xapb.E_SpawnInfo_SpawnInfo)
if err != nil {
return nil, fmt.Errorf("extra action does not have SpawnInfo: %v", err)
}
si := msg.(*xapb.SpawnInfo)
ai := &ActionInfo{
Target: info.GetOwner(),
Arguments: si.Argument,
Inputs: si.InputFile,
Outputs: si.OutputFile,
}
for _, env := range si.Variable {
ai.Setenv(env.GetName(), env.GetValue())
}
return ai, nil
} | type ActionInfo struct {
Arguments []string // command-line arguments
Inputs []string // input file paths |
int_literal.rs | use crate::context::CompilerContext;
use crate::source::SourceOrigin;
use crate::{ast, program};
pub fn | (
expression: ast::IntLiteralExpr,
context: &mut CompilerContext,
) -> program::Expression {
program::Expression::new(
program::LiteralExpr {
value: program::LiteralValue::Int(expression.value),
location: SourceOrigin::Plain(expression.span),
},
context.program.types_mut(),
)
}
| compile_int_literal_expr |
pogress.py | from tqdm import tqdm,trange
from time import sleep
| for i in trange(20):
sleep(0.1)
pass
raise SystemExit | |
mod.rs | mod window_description;
pub use window_description::*;
mod window_event; |
mod cursor;
pub use cursor::*; | pub use window_event::*; |
executor.go | package loop
import (
"context"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/lightninglabs/lndclient"
"github.com/lightninglabs/loop/loopdb"
"github.com/lightninglabs/loop/sweep"
"github.com/lightningnetwork/lnd/queue"
)
// executorConfig contains executor configuration data.
type executorConfig struct {
lnd *lndclient.LndServices
sweeper *sweep.Sweeper
store loopdb.SwapStore
createExpiryTimer func(expiry time.Duration) <-chan time.Time
loopOutMaxParts uint32
cancelSwap func(ctx context.Context, details *outCancelDetails) error
}
// executor is responsible for executing swaps.
//
// TODO(roasbeef): rename to SubSwapper
type executor struct {
wg sync.WaitGroup
newSwaps chan genericSwap
currentHeight uint32
ready chan struct{}
executorConfig
}
// newExecutor returns a new swap executor instance.
func | (cfg *executorConfig) *executor {
return &executor{
executorConfig: *cfg,
newSwaps: make(chan genericSwap),
ready: make(chan struct{}),
}
}
// run starts the executor event loop. It accepts and executes new swaps,
// providing them with required config data.
func (s *executor) run(mainCtx context.Context,
statusChan chan<- SwapInfo) error {
var (
err error
blockEpochChan <-chan int32
blockErrorChan <-chan error
)
for {
blockEpochChan, blockErrorChan, err =
s.lnd.ChainNotifier.RegisterBlockEpochNtfn(mainCtx)
if err != nil {
if strings.Contains(err.Error(),
"in the process of starting") {
log.Warnf("LND chain notifier server not " +
"ready yet, retrying with delay")
// Give chain notifier some time to start and
// try to re-attempt block epoch subscription.
select {
case <-time.After(500 * time.Millisecond):
continue
case <-mainCtx.Done():
return err
}
}
return err
}
break
}
// Before starting, make sure we have an up to date block height.
// Otherwise we might reveal a preimage for a swap that is already
// expired.
log.Infof("Wait for first block ntfn")
var height int32
setHeight := func(h int32) {
height = h
atomic.StoreUint32(&s.currentHeight, uint32(h))
}
select {
case h := <-blockEpochChan:
setHeight(h)
case err := <-blockErrorChan:
return err
case <-mainCtx.Done():
return mainCtx.Err()
}
// Start main event loop.
log.Infof("Starting event loop at height %v", height)
// Signal that executor being ready with an up to date block height.
close(s.ready)
// Use a map to administer the individual notification queues for the
// swaps.
blockEpochQueues := make(map[int]*queue.ConcurrentQueue)
// On exit, stop all queue goroutines.
defer func() {
for _, queue := range blockEpochQueues {
queue.Stop()
}
}()
swapDoneChan := make(chan int)
nextSwapID := 0
for {
select {
case newSwap := <-s.newSwaps:
queue := queue.NewConcurrentQueue(10)
queue.Start()
swapID := nextSwapID
blockEpochQueues[swapID] = queue
s.wg.Add(1)
go func() {
defer s.wg.Done()
err := newSwap.execute(mainCtx, &executeConfig{
statusChan: statusChan,
sweeper: s.sweeper,
blockEpochChan: queue.ChanOut(),
timerFactory: s.executorConfig.createExpiryTimer,
loopOutMaxParts: s.executorConfig.loopOutMaxParts,
cancelSwap: s.executorConfig.cancelSwap,
}, height)
if err != nil && err != context.Canceled {
log.Errorf("Execute error: %v", err)
}
select {
case swapDoneChan <- swapID:
case <-mainCtx.Done():
}
}()
nextSwapID++
case doneID := <-swapDoneChan:
queue, ok := blockEpochQueues[doneID]
if !ok {
return fmt.Errorf(
"swap id %v not found in queues",
doneID)
}
queue.Stop()
delete(blockEpochQueues, doneID)
case h := <-blockEpochChan:
setHeight(h)
for _, queue := range blockEpochQueues {
select {
case queue.ChanIn() <- h:
case <-mainCtx.Done():
return mainCtx.Err()
}
}
case err := <-blockErrorChan:
return fmt.Errorf("block error: %v", err)
case <-mainCtx.Done():
return mainCtx.Err()
}
}
}
// initiateSwap delivers a new swap to the executor main loop.
func (s *executor) initiateSwap(ctx context.Context,
swap genericSwap) {
select {
case s.newSwaps <- swap:
case <-ctx.Done():
return
}
}
// height returns the current height known to the swap server.
func (s *executor) height() int32 {
return int32(atomic.LoadUint32(&s.currentHeight))
}
// waitFinished waits for all swap goroutines to finish.
func (s *executor) waitFinished() {
s.wg.Wait()
}
| newExecutor |
index.ts | export { default } from './component'; | export type { ProjectGalleryProps } from './types'; |
|
tempSegHdr1.js | function initializetempSegHdr1() {
boxRefSegHdr1 = new kony.ui.Box({
"focusSkin": "hBoxSegHdr",
"id": "boxRefSegHdr1",
| "isVisible": true,
"orientation": constants.BOX_LAYOUT_HORIZONTAL,
"position": constants.BOX_POSITION_AS_NORMAL,
"skin": "hBoxSegHdr1"
}, {
"containerWeight": 100,
"layoutAlignment": constants.BOX_LAYOUT_ALIGN_FROM_LEFT,
"layoutType": constants.CONTAINER_LAYOUT_BOX,
"margin": [0, 0, 0, 0],
"marginInPixel": true,
"padding": [0, 0, 0, 0],
"paddingInPixel": true,
"percent": true,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_TOP_LEFT
}, {});
var vbox120944992024383 = new kony.ui.Box({
"id": "vbox120944992024383",
"isVisible": true,
"orientation": constants.BOX_LAYOUT_VERTICAL,
"position": constants.BOX_POSITION_AS_NORMAL,
"skin": "vBoxTrans"
}, {
"containerWeight": 100,
"layoutType": constants.CONTAINER_LAYOUT_BOX,
"margin": [0, 0, 0, 0],
"marginInPixel": true,
"padding": [0, 0, 0, 0],
"paddingInPixel": true,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_TOP_LEFT
}, {});
var lblSecHdr1 = new kony.ui.Label({
"id": "lblSecHdr1",
"isVisible": true,
"skin": "lblSegScHdrBlkBG",
"text": "Label"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [0, 0, 0, 0],
"marginInPixel": true,
"padding": [1, 0, 1, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_MIDDLE_LEFT
}, {
"textCopyable": false,
"wrapping": constants.WIDGET_TEXT_WORD_WRAP
});
var lblSecHdr2 = new kony.ui.Label({
"id": "lblSecHdr2",
"isVisible": true,
"skin": "lblSegScHdrBlkBG",
"text": "Label"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [0, 0, 0, 0],
"marginInPixel": true,
"padding": [0, 0, 2, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_MIDDLE_LEFT
}, {
"textCopyable": false,
"wrapping": constants.WIDGET_TEXT_WORD_WRAP
});
vbox120944992024383.add(lblSecHdr1, lblSecHdr2);
boxRefSegHdr1.add(vbox120944992024383);
} | |
manage.py | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CLI interface for nova management.
"""
import collections
import functools
import os
import re
import sys
import time
import traceback
import typing as ty
from urllib import parse as urlparse
from dateutil import parser as dateutil_parser
from keystoneauth1 import exceptions as ks_exc
from neutronclient.common import exceptions as neutron_client_exc
from os_brick.initiator import connector
import os_resource_classes as orc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import uuidutils
import prettytable
from sqlalchemy.engine import url as sqla_url
from nova.cmd import common as cmd_common
from nova.compute import api
from nova.compute import instance_actions
from nova.compute import rpcapi
import nova.conf
from nova import config
from nova import context
from nova.db import constants as db_const
from nova.db.main import api as db
from nova.db import migration
from nova import exception
from nova.i18n import _
from nova.network import constants
from nova.network import neutron as neutron_api
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import compute_node as compute_node_obj
from nova.objects import fields as obj_fields
from nova.objects import host_mapping as host_mapping_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_mapping as instance_mapping_obj
from nova.objects import pci_device as pci_device_obj
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as virtual_interface_obj
from nova import rpc
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova import version
from nova.virt.libvirt import machine_type_utils
from nova.volume import cinder
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
# Keep this list sorted and one entry per line for readability.
_EXTRA_DEFAULT_LOG_LEVELS = [
'nova=ERROR',
'oslo_concurrency=INFO',
'oslo_db=INFO',
'oslo_policy=INFO',
'oslo.privsep=ERROR',
'os_brick=ERROR',
]
# Consts indicating whether allocations need to be healed by creating them or
# by updating existing allocations.
_CREATE = 'create'
_UPDATE = 'update'
# Decorators for actions
args = cmd_common.args
action_description = cmd_common.action_description
def mask_passwd_in_url(url):
parsed = urlparse.urlparse(url)
safe_netloc = re.sub(':.*@', ':****@', parsed.netloc)
new_parsed = urlparse.ParseResult(
parsed.scheme, safe_netloc,
parsed.path, parsed.params,
parsed.query, parsed.fragment)
return urlparse.urlunparse(new_parsed)
def format_dict(dct, dict_property="Property", dict_value='Value',
sort_key=None):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param dict_value: header label for the value (second) column
:param sort_key: key used for sorting the dict
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
v = str(v)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, str) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
return encodeutils.safe_encode(pt.get_string()).decode()
class DbCommands(object):
"""Class for managing the main database."""
# NOTE(danms): These functions are called with a DB context and a
# count, which is the maximum batch size requested by the
# user. They must be idempotent. At most $count records should be
# migrated. The function must return a tuple of (found, done). The
# found value indicates how many unmigrated/candidate records existed in
# the database prior to the migration (either total, or up to the
# $count limit provided), and a nonzero found value may tell the user
# that there is still work to do. The done value indicates whether
# or not any records were actually migrated by the function. Thus
# if both (found, done) are nonzero, work was done and some work
# remains. If found is nonzero and done is zero, some records are
# not migratable (or don't need migrating), but all migrations that can
# complete have finished.
# NOTE(stephenfin): These names must be unique
online_migrations = (
# Added in Pike
quotas_obj.migrate_quota_limits_to_api_db,
# Added in Pike
quotas_obj.migrate_quota_classes_to_api_db,
# Added in Queens
db.migration_migrate_to_uuid,
# Added in Queens
block_device_obj.BlockDeviceMapping.populate_uuids,
# Added in Rocky
# NOTE(tssurya): This online migration is going to be backported to
# Queens and Pike since instance.avz of instances before Pike
# need to be populated if it was not specified during boot time.
instance_obj.populate_missing_availability_zones,
# Added in Rocky
instance_mapping_obj.populate_queued_for_delete,
# Added in Stein
compute_node_obj.migrate_empty_ratio,
# Added in Stein
virtual_interface_obj.fill_virtual_interface_list,
# Added in Stein
instance_mapping_obj.populate_user_id,
# Added in Victoria
pci_device_obj.PciDevice.populate_dev_uuids,
)
@args('--local_cell', action='store_true',
help='Only sync db in the local cell: do not attempt to fan-out '
'to all cells')
@args('version', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None, local_cell=False):
"""Sync the database up to the most recent version."""
if not local_cell:
ctxt = context.RequestContext()
# NOTE(mdoff): Multiple cells not yet implemented. Currently
# fanout only looks for cell0.
try:
cell_mapping = objects.CellMapping.get_by_uuid(
ctxt, objects.CellMapping.CELL0_UUID,
)
with context.target_cell(ctxt, cell_mapping) as cctxt:
migration.db_sync(version, context=cctxt)
except exception.CellMappingNotFound:
msg = _(
'WARNING: cell0 mapping not found - not syncing cell0.'
)
print(msg)
except Exception as e:
msg = _(
'ERROR: Could not access cell0.\n'
'Has the nova_api database been created?\n'
'Has the nova_cell0 database been created?\n'
'Has "nova-manage api_db sync" been run?\n'
'Has "nova-manage cell_v2 map_cell0" been run?\n'
'Is [api_database]/connection set in nova.conf?\n'
'Is the cell0 database connection URL correct?\n'
'Error: %s'
)
print(msg % str(e))
return 1
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', type=int, metavar='<number>', dest='max_rows',
help='Maximum number of deleted rows to archive. Defaults to 1000. '
'Note that this number does not include the corresponding '
'rows, if any, that are removed from the API database for '
'deleted instances.')
@args('--before', metavar='<date>',
help=('Archive rows that have been deleted before this date. '
'Accepts date strings in the default format output by the '
'``date`` command, as well as ``YYYY-MM-DD [HH:mm:ss]``.'))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Print how many rows were archived per table.')
@args('--until-complete', action='store_true', dest='until_complete',
default=False,
help=('Run continuously until all deleted rows are archived. Use '
'max_rows as a batch size for each iteration.'))
@args('--purge', action='store_true', dest='purge', default=False,
help='Purge all data from shadow tables after archive completes')
@args('--all-cells', action='store_true', dest='all_cells',
default=False, help='Run command across all cells.')
@args('--task-log', action='store_true', dest='task_log', default=False,
help=('Also archive ``task_log`` table records. Note that '
'``task_log`` records are never deleted, so archiving them '
'will move all of the ``task_log`` records up to now into the '
'shadow tables. It is recommended to also specify the '
'``--before`` option to avoid races for those consuming '
'``task_log`` record data via the '
'``/os-instance_usage_audit_log`` API (example: Telemetry).'))
@args('--sleep', type=int, metavar='<seconds>', dest='sleep',
help='The amount of time in seconds to sleep between batches when '
'``--until-complete`` is used. Defaults to 0.')
def archive_deleted_rows(
self, max_rows=1000, verbose=False,
until_complete=False, purge=False,
before=None, all_cells=False, task_log=False, sleep=0,
):
"""Move deleted rows from production tables to shadow tables.
Returns 0 if nothing was archived, 1 if some number of rows were
archived, 2 if max_rows is invalid, 3 if no connection could be
established to the API DB, 4 if before date is invalid. If automating,
this should be run continuously while the result
is 1, stopping at 0.
"""
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return 2
if max_rows > db_const.MAX_INT:
print(_('max rows must be <= %(max_value)d') %
{'max_value': db_const.MAX_INT})
return 2
ctxt = context.get_admin_context()
try:
# NOTE(tssurya): This check has been added to validate if the API
# DB is reachable or not as this is essential for purging the
# related API database records of the deleted instances.
cell_mappings = objects.CellMappingList.get_all(ctxt)
except db_exc.CantStartEngineError:
print(_('Failed to connect to API DB so aborting this archival '
'attempt. Please check your config file to make sure that '
'[api_database]/connection is set and run this '
'command again.'))
return 3
if before:
try:
before_date = dateutil_parser.parse(before, fuzzy=True)
except ValueError as e:
print(_('Invalid value for --before: %s') % e)
return 4
else:
before_date = None
table_to_rows_archived = {}
if until_complete and verbose:
sys.stdout.write(_('Archiving') + '..') # noqa
interrupt = False
if all_cells:
# Sort first by cell name, then by table:
# +--------------------------------+-------------------------+
# | Table | Number of Rows Archived |
# +--------------------------------+-------------------------+
# | cell0.block_device_mapping | 1 |
# | cell1.block_device_mapping | 1 |
# | cell1.instance_actions | 2 |
# | cell1.instance_actions_events | 2 |
# | cell2.block_device_mapping | 1 |
# | cell2.instance_actions | 2 |
# | cell2.instance_actions_events | 2 |
# ...
def sort_func(item):
cell_name, table = item[0].split('.')
return cell_name, table
print_sort_func = sort_func
else:
cell_mappings = [None]
print_sort_func = None
total_rows_archived = 0
for cell_mapping in cell_mappings:
# NOTE(Kevin_Zheng): No need to calculate limit for each
# cell if until_complete=True.
# We need not adjust max rows to avoid exceeding a specified total
# limit because with until_complete=True, we have no total limit.
if until_complete:
max_rows_to_archive = max_rows
elif max_rows > total_rows_archived:
# We reduce the max rows to archive based on what we've
# archived so far to avoid potentially exceeding the specified
# total limit.
max_rows_to_archive = max_rows - total_rows_archived
else:
break
# If all_cells=False, cell_mapping is None
with context.target_cell(ctxt, cell_mapping) as cctxt:
cell_name = cell_mapping.name if cell_mapping else None
try:
rows_archived = self._do_archive(
table_to_rows_archived,
cctxt,
max_rows_to_archive,
until_complete,
verbose,
before_date,
cell_name,
task_log,
sleep)
except KeyboardInterrupt:
interrupt = True
break
# TODO(melwitt): Handle skip/warn for unreachable cells. Note
# that cell_mappings = [None] if not --all-cells
total_rows_archived += rows_archived
if until_complete and verbose:
if interrupt:
print('.' + _('stopped')) # noqa
else:
print('.' + _('complete')) # noqa
if verbose:
if table_to_rows_archived:
print(format_dict(
table_to_rows_archived,
dict_property=_('Table'),
dict_value=_('Number of Rows Archived'),
sort_key=print_sort_func,
))
else:
print(_('Nothing was archived.'))
if table_to_rows_archived and purge:
if verbose:
print(_('Rows were archived, running purge...'))
self.purge(purge_all=True, verbose=verbose, all_cells=all_cells)
# NOTE(danms): Return nonzero if we archived something
return int(bool(table_to_rows_archived))
def _do_archive(
self, table_to_rows_archived, cctxt, max_rows,
until_complete, verbose, before_date, cell_name, task_log, sleep,
):
"""Helper function for archiving deleted rows for a cell.
This will archive deleted rows for a cell database and remove the
associated API database records for deleted instances.
:param table_to_rows_archived: Dict tracking the number of rows
archived by <cell_name>.<table name>. Example:
{'cell0.instances': 2,
'cell1.instances': 5}
:param cctxt: Cell-targeted nova.context.RequestContext if archiving
across all cells
:param max_rows: Maximum number of deleted rows to archive
:param until_complete: Whether to run continuously until all deleted
rows are archived
:param verbose: Whether to print how many rows were archived per table
:param before_date: Archive rows that were deleted before this date
:param cell_name: Name of the cell or None if not archiving across all
cells
:param task_log: Whether to archive task_log table rows
:param sleep: The amount of time in seconds to sleep between batches
when ``until_complete`` is True.
"""
ctxt = context.get_admin_context()
while True:
run, deleted_instance_uuids, total_rows_archived = \
db.archive_deleted_rows(
cctxt, max_rows, before=before_date, task_log=task_log)
for table_name, rows_archived in run.items():
if cell_name:
table_name = cell_name + '.' + table_name
table_to_rows_archived.setdefault(table_name, 0)
table_to_rows_archived[table_name] += rows_archived
if deleted_instance_uuids:
table_to_rows_archived.setdefault(
'API_DB.instance_mappings', 0)
table_to_rows_archived.setdefault(
'API_DB.request_specs', 0)
table_to_rows_archived.setdefault(
'API_DB.instance_group_member', 0)
deleted_mappings = objects.InstanceMappingList.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived[
'API_DB.instance_mappings'] += deleted_mappings
deleted_specs = objects.RequestSpec.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived[
'API_DB.request_specs'] += deleted_specs
deleted_group_members = (
objects.InstanceGroup.destroy_members_bulk(
ctxt, deleted_instance_uuids))
table_to_rows_archived[
'API_DB.instance_group_member'] += deleted_group_members
# If we're not archiving until there is nothing more to archive, we
# have reached max_rows in this cell DB or there was nothing to
# archive.
if not until_complete or not run:
break
if verbose:
sys.stdout.write('.')
# Optionally sleep between batches to throttle the archiving.
time.sleep(sleep)
return total_rows_archived
@args('--before', metavar='<before>', dest='before',
help='If specified, purge rows from shadow tables that are older '
'than this. Accepts date strings in the default format output '
'by the ``date`` command, as well as ``YYYY-MM-DD '
'[HH:mm:ss]``.')
@args('--all', dest='purge_all', action='store_true',
help='Purge all rows in the shadow tables')
@args('--verbose', dest='verbose', action='store_true', default=False,
help='Print information about purged records')
@args('--all-cells', dest='all_cells', action='store_true', default=False,
help='Run against all cell databases')
def purge(self, before=None, purge_all=False, verbose=False,
all_cells=False):
if before is None and purge_all is False:
print(_('Either --before or --all is required'))
return 1
if before:
try:
before_date = dateutil_parser.parse(before, fuzzy=True)
except ValueError as e:
print(_('Invalid value for --before: %s') % e)
return 2
else:
before_date = None
def status(msg):
if verbose:
print('%s: %s' % (identity, msg))
deleted = 0
admin_ctxt = context.get_admin_context()
if all_cells:
try:
cells = objects.CellMappingList.get_all(admin_ctxt)
except db_exc.DBError:
print(_('Unable to get cell list from API DB. '
'Is it configured?'))
return 4
for cell in cells:
identity = _('Cell %s') % cell.identity
with context.target_cell(admin_ctxt, cell) as cctxt:
deleted += db.purge_shadow_tables(
cctxt, before_date, status_fn=status)
else:
identity = _('DB')
deleted = db.purge_shadow_tables(
admin_ctxt, before_date, status_fn=status)
if deleted:
return 0
else:
return 3
def _run_migration(self, ctxt, max_count):
ran = 0
exceptions = False
migrations = {}
for migration_meth in self.online_migrations:
count = max_count - ran
try:
found, done = migration_meth(ctxt, count)
except Exception:
msg = (_("Error attempting to run %(method)s") % dict(
method=migration_meth))
print(msg)
LOG.exception(msg)
exceptions = True
found = done = 0
name = migration_meth.__name__
if found:
print(_('%(total)i rows matched query %(meth)s, %(done)i '
'migrated') % {'total': found,
'meth': name,
'done': done})
# This is the per-migration method result for this batch, and
# _run_migration will either continue on to the next migration,
# or stop if up to this point we've processed max_count of
# records across all migration methods.
migrations[name] = found, done
if max_count is not None:
ran += done
if ran >= max_count:
break
return migrations, exceptions
@args('--max-count', metavar='<number>', dest='max_count',
help='Maximum number of objects to consider')
def online_data_migrations(self, max_count=None):
ctxt = context.get_admin_context()
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive value for max_number'))
return 127
else:
unlimited = True
max_count = 50
print(_('Running batches of %i until complete') % max_count)
ran = None
migration_info = {}
exceptions = False
while ran is None or ran != 0:
migrations, exceptions = self._run_migration(ctxt, max_count)
ran = 0
# For each batch of migration method results, build the cumulative
# set of results.
for name in migrations:
migration_info.setdefault(name, (0, 0))
migration_info[name] = (
migration_info[name][0] + migrations[name][0],
migration_info[name][1] + migrations[name][1],
)
ran += migrations[name][1]
if not unlimited:
break
t = prettytable.PrettyTable([_('Migration'),
_('Total Needed'), # Really: Total Found
_('Completed')])
for name in sorted(migration_info.keys()):
info = migration_info[name]
t.add_row([name, info[0], info[1]])
print(t)
# NOTE(imacdonn): In the "unlimited" case, the loop above will only
# terminate when all possible migrations have been effected. If we're
# still getting exceptions, there's a problem that requires
# intervention. In the max-count case, exceptions are only considered
# fatal if no work was done by any other migrations ("not ran"),
# because otherwise work may still remain to be done, and that work
# may resolve dependencies for the failing migrations.
if exceptions and (unlimited or not ran):
print(_("Some migrations failed unexpectedly. Check log for "
"details."))
return 2
# TODO(mriedem): Potentially add another return code for
# "there are more migrations, but not completable right now"
return ran and 1 or 0
class ApiDbCommands(object):
"""Class for managing the api database."""
def __init__(self):
pass
@args('version', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version, database='api')
def version(self):
"""Print the current database version."""
print(migration.db_version(database='api'))
class CellV2Commands(object):
"""Commands for managing cells v2."""
def _validate_transport_url(self, transport_url, warn_about_none=True):
if not transport_url:
if not CONF.transport_url:
if warn_about_none:
print(_(
'Must specify --transport-url if '
'[DEFAULT]/transport_url is not set in the '
'configuration file.'))
return None
print(_('--transport-url not provided in the command line, '
'using the value [DEFAULT]/transport_url from the '
'configuration file'))
transport_url = CONF.transport_url
try:
messaging.TransportURL.parse(conf=CONF,
url=objects.CellMapping.format_mq_url(
transport_url))
except (messaging.InvalidTransportURL, ValueError) as e:
print(_('Invalid transport URL: %s') % str(e))
return None
return transport_url
def _validate_database_connection(
self, database_connection, warn_about_none=True):
if not database_connection:
if not CONF.database.connection:
if warn_about_none:
print(_(
'Must specify --database_connection if '
'[database]/connection is not set in the '
'configuration file.'))
return None
print(_('--database_connection not provided in the command line, '
'using the value [database]/connection from the '
'configuration file'))
return CONF.database.connection
return database_connection
def _non_unique_transport_url_database_connection_checker(self, ctxt,
cell_mapping, transport_url, database_connection):
for cell in objects.CellMappingList.get_all(ctxt):
if cell_mapping and cell.uuid == cell_mapping.uuid:
# If we're looking for a specific cell, then don't check
# that one for same-ness to allow idempotent updates
continue
if (cell.database_connection == database_connection or
cell.transport_url == transport_url):
print(_('The specified transport_url and/or '
'database_connection combination already exists '
'for another cell with uuid %s.') % cell.uuid)
return True
return False
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help='The transport url for the cell message queue')
def simple_cell_setup(self, transport_url=None):
"""Simple cellsv2 setup.
This simplified command is for use by existing non-cells users to
configure the default environment. Returns 0 if setup is completed (or
has already been done) and 1 if no hosts are reporting (and this cannot
be mapped).
"""
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
ctxt = context.RequestContext()
try:
cell0_mapping = self._map_cell0()
except db_exc.DBDuplicateEntry:
print(_('Cell0 is already setup'))
cell0_mapping = objects.CellMapping.get_by_uuid(
ctxt, objects.CellMapping.CELL0_UUID)
# Run migrations so cell0 is usable
with context.target_cell(ctxt, cell0_mapping) as cctxt:
try:
migration.db_sync(None, context=cctxt)
except db_exc.DBError as ex:
print(_('Unable to sync cell0 schema: %s') % ex)
cell_uuid = self._map_cell_and_hosts(transport_url)
if cell_uuid is None:
# There are no compute hosts which means no cell_mapping was
# created. This should also mean that there are no instances.
return 1
self.map_instances(cell_uuid)
return 0
@args('--database_connection',
metavar='<database_connection>',
help='The database connection url for cell0. '
'This is optional. If not provided, a standard database '
'connection will be used based on the main database connection '
'from the Nova configuration.'
)
def map_cell0(self, database_connection=None):
"""Create a cell mapping for cell0.
cell0 is used for instances that have not been scheduled to any cell.
This generally applies to instances that have encountered an error
before they have been scheduled.
This command creates a cell mapping for this special cell which
requires a database to store the instance data.
Returns 0 if cell0 created successfully or already setup.
"""
try:
self._map_cell0(database_connection=database_connection)
except db_exc.DBDuplicateEntry:
print(_('Cell0 is already setup'))
return 0
def _map_cell0(self, database_connection=None):
"""Faciliate creation of a cell mapping for cell0.
See map_cell0 for more.
"""
def cell0_default_connection():
# If no database connection is provided one is generated
# based on the database connection url.
# The cell0 database will use the same database scheme and
# netloc as the main database, with a related path.
# NOTE(sbauza): The URL has to be RFC1738 compliant in order to
# be usable by sqlalchemy.
connection = CONF.database.connection
# sqlalchemy has a nice utility for parsing database connection
# URLs so we use that here to get the db name so we don't have to
# worry about parsing and splitting a URL which could have special
# characters in the password, which makes parsing a nightmare.
url = sqla_url.make_url(connection)
# TODO(gibi): remove hasattr() conditional in favor of "url.set()"
# when SQLAlchemy 1.4 is the minimum version in requirements
if hasattr(url, "set"):
url = url.set(database=url.database + '_cell0')
else:
# TODO(zzzeek): remove when SQLAlchemy 1.4
# is the minimum version in requirements
url.database = url.database + '_cell0'
return urlparse.unquote(str(url))
dbc = database_connection or cell0_default_connection()
ctxt = context.RequestContext()
# A transport url of 'none://' is provided for cell0. RPC should not
# be used to access cell0 objects. Cells transport switching will
# ignore any 'none' transport type.
cell_mapping = objects.CellMapping(
ctxt, uuid=objects.CellMapping.CELL0_UUID, name="cell0",
transport_url="none:///",
database_connection=dbc)
cell_mapping.create()
return cell_mapping
def _get_and_map_instances(self, ctxt, cell_mapping, limit, marker):
filters = {}
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_filters(
cctxt.elevated(read_deleted='yes'), filters,
sort_key='created_at', sort_dir='asc', limit=limit,
marker=marker)
for instance in instances:
try:
mapping = objects.InstanceMapping(ctxt)
mapping.instance_uuid = instance.uuid
mapping.cell_mapping = cell_mapping
mapping.project_id = instance.project_id
mapping.user_id = instance.user_id
mapping.create()
except db_exc.DBDuplicateEntry:
continue
if len(instances) == 0 or len(instances) < limit:
# We've hit the end of the instances table
marker = None
else:
marker = instances[-1].uuid
return marker
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True,
help='Unmigrated instances will be mapped to the cell with the '
'uuid provided.')
@args('--max-count', metavar='<max_count>', dest='max_count',
help='Maximum number of instances to map. If not set, all instances '
'in the cell will be mapped in batches of 50. If you have a '
'large number of instances, consider specifying a custom value '
'and run the command until it exits with 0.')
@args('--reset', action='store_true', dest='reset_marker',
help='The command will start from the beginning as opposed to the '
'default behavior of starting from where the last run '
'finished')
def map_instances(self, cell_uuid, max_count=None, reset_marker=None):
"""Map instances into the provided cell.
Instances in the nova database of the provided cell (nova database
info is obtained from the nova-api database) will be queried from
oldest to newest and if unmapped, will be mapped to the provided cell.
A max-count can be set on the number of instance to map in a single
run. Repeated runs of the command will start from where the last run
finished so it is not necessary to increase max-count to finish. A
reset option can be passed which will reset the marker, thus making the
command start from the beginning as opposed to the default behavior of
starting from where the last run finished. An exit code of 0 indicates
that all instances have been mapped.
"""
# NOTE(stephenfin): The support for batching in this command relies on
# a bit of a hack. We initially process N instance-cell mappings, where
# N is the value of '--max-count' if provided else 50. To ensure we
# can continue from N on the next iteration, we store a instance-cell
# mapping object with a special name and the UUID of the last
# instance-cell mapping processed (N - 1) in munged form. On the next
# iteration, we search for the special name and unmunge the UUID to
# pick up where we left off. This is done until all mappings are
# processed. The munging is necessary as there's a unique constraint on
# the UUID field and we need something reversable. For more
# information, see commit 9038738d0.
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
map_all = False
if max_count < 1:
print(_('Must supply a positive value for max-count'))
return 127
else:
map_all = True
max_count = 50
ctxt = context.RequestContext()
marker_project_id = 'INSTANCE_MIGRATION_MARKER'
# Validate the cell exists, this will raise if not
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# Check for a marker from a previous run
marker_mapping = objects.InstanceMappingList.get_by_project_id(ctxt,
marker_project_id)
if len(marker_mapping) == 0:
marker = None
else:
# There should be only one here
marker = marker_mapping[0].instance_uuid.replace(' ', '-')
if reset_marker:
marker = None
marker_mapping[0].destroy()
next_marker = True
while next_marker is not None:
next_marker = self._get_and_map_instances(ctxt, cell_mapping,
max_count, marker)
marker = next_marker
if not map_all:
break
if next_marker:
# Don't judge me. There's already an InstanceMapping with this UUID
# so the marker needs to be non destructively modified.
next_marker = next_marker.replace('-', ' ')
# This is just the marker record, so set user_id to the special
# marker name as well.
objects.InstanceMapping(ctxt, instance_uuid=next_marker,
project_id=marker_project_id,
user_id=marker_project_id).create()
return 1
return 0
def _map_cell_and_hosts(self, transport_url, name=None, verbose=False):
ctxt = context.RequestContext()
cell_mapping_uuid = cell_mapping = None
# First, try to detect if a CellMapping has already been created
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
if not compute_nodes:
print(_('No hosts found to map to cell, exiting.'))
return None
missing_nodes = set()
for compute_node in compute_nodes:
try:
host_mapping = objects.HostMapping.get_by_host(
ctxt, compute_node.host)
except exception.HostMappingNotFound:
missing_nodes.add(compute_node.host)
else:
if verbose:
print(_(
'Host %(host)s is already mapped to cell %(uuid)s'
) % {'host': host_mapping.host,
'uuid': host_mapping.cell_mapping.uuid})
# Re-using the existing UUID in case there is already a mapping
# NOTE(sbauza): There could be possibly multiple CellMappings
# if the operator provides another configuration file and moves
# the hosts to another cell v2, but that's not really something
# we should support.
cell_mapping_uuid = host_mapping.cell_mapping.uuid
if not missing_nodes:
print(_('All hosts are already mapped to cell(s).'))
return cell_mapping_uuid
# Create the cell mapping in the API database
if cell_mapping_uuid is not None:
cell_mapping = objects.CellMapping.get_by_uuid(
ctxt, cell_mapping_uuid)
if cell_mapping is None:
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt, uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=CONF.database.connection)
cell_mapping.create()
# Pull the hosts from the cell database and create the host mappings
for compute_host in missing_nodes:
host_mapping = objects.HostMapping(
ctxt, host=compute_host, cell_mapping=cell_mapping)
host_mapping.create()
if verbose:
print(cell_mapping_uuid)
return cell_mapping_uuid
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help='The transport url for the cell message queue')
@args('--name', metavar='<cell_name>', help='The name of the cell')
@args('--verbose', action='store_true',
help='Output the cell mapping uuid for any newly mapped hosts.')
def map_cell_and_hosts(self, transport_url=None, name=None, verbose=False):
"""EXPERIMENTAL. Create a cell mapping and host mappings for a cell.
Users not dividing their cloud into multiple cells will be a single
cell v2 deployment and should specify:
nova-manage cell_v2 map_cell_and_hosts --config-file <nova.conf>
Users running multiple cells can add a cell v2 by specifying:
nova-manage cell_v2 map_cell_and_hosts --config-file <cell nova.conf>
"""
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
self._map_cell_and_hosts(transport_url, name, verbose)
# online_data_migrations established a pattern of 0 meaning everything
# is done, 1 means run again to do more work. This command doesn't do
# partial work so 0 is appropriate.
return 0
@args('--uuid', metavar='<instance_uuid>', dest='uuid', required=True,
help=_('The instance UUID to verify'))
@args('--quiet', action='store_true', dest='quiet',
help=_('Do not print anything'))
def verify_instance(self, uuid, quiet=False):
"""Verify instance mapping to a cell.
This command is useful to determine if the cellsv2 environment is
properly setup, specifically in terms of the cell, host, and instance
mapping records required.
This prints one of three strings (and exits with a code) indicating
whether the instance is successfully mapped to a cell (0), is unmapped
due to an incomplete upgrade (1), unmapped due to normally transient
state (2), it is a deleted instance which has instance mapping (3),
or it is an archived instance which still has an instance mapping (4).
"""
def say(string):
if not quiet:
print(string)
ctxt = context.get_admin_context()
try:
mapping = objects.InstanceMapping.get_by_instance_uuid(
ctxt, uuid)
except exception.InstanceMappingNotFound:
say('Instance %s is not mapped to a cell '
'(upgrade is incomplete) or instance '
'does not exist' % uuid)
return 1
if mapping.cell_mapping is None:
say('Instance %s is not mapped to a cell' % uuid)
return 2
else:
with context.target_cell(ctxt, mapping.cell_mapping) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, uuid)
except exception.InstanceNotFound:
try:
el_ctx = cctxt.elevated(read_deleted='yes')
instance = objects.Instance.get_by_uuid(el_ctx, uuid)
# instance is deleted
if instance:
say('The instance with uuid %s has been deleted.'
% uuid)
say('Execute '
'`nova-manage db archive_deleted_rows` '
'command to archive this deleted '
'instance and remove its instance_mapping.')
return 3
except exception.InstanceNotFound:
# instance is archived
say('The instance with uuid %s has been archived.'
% uuid)
say('However its instance_mapping remains.')
return 4
# instance is alive and mapped to a cell
say('Instance %s is in cell: %s (%s)' % (
uuid,
mapping.cell_mapping.name,
mapping.cell_mapping.uuid))
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help='If provided only this cell will be searched for new hosts to '
'map.')
@args('--verbose', action='store_true',
help=_('Provide detailed output when discovering hosts.'))
@args('--strict', action='store_true',
help=_('Considered successful (exit code 0) only when an unmapped '
'host is discovered. Any other outcome will be considered a '
'failure (non-zero exit code).'))
@args('--by-service', action='store_true', default=False,
dest='by_service',
help=_('Discover hosts by service instead of compute node'))
def discover_hosts(self, cell_uuid=None, verbose=False, strict=False,
by_service=False):
"""Searches cells, or a single cell, and maps found hosts.
When a new host is added to a deployment it will add a service entry
to the db it's configured to use. This command will check the db for
each cell, or a single one if passed in, and map any hosts which are
not currently mapped. If a host is already mapped nothing will be done.
This command should be run once after all compute hosts have been
deployed and should not be run in parallel. When run in parallel,
the commands will collide with each other trying to map the same hosts
in the database at the same time.
"""
def status_fn(msg):
if verbose:
print(msg)
ctxt = context.RequestContext()
try:
hosts = host_mapping_obj.discover_hosts(ctxt, cell_uuid, status_fn,
by_service)
except exception.HostMappingExists as exp:
print(_('ERROR: Duplicate host mapping was encountered. This '
'command should be run once after all compute hosts have '
'been deployed and should not be run in parallel. When '
'run in parallel, the commands will collide with each '
'other trying to map the same hosts in the database at '
'the same time. Error: %s') % exp)
return 2
# discover_hosts will return an empty list if no hosts are discovered
if strict:
return int(not hosts)
@action_description(
_("Add a new cell to nova API database. "
"DB and MQ urls can be provided directly "
"or can be taken from config. The result is cell uuid."))
@args('--name', metavar='<cell_name>', help=_('The name of the cell'))
@args('--database_connection', metavar='<database_connection>',
dest='database_connection',
help=_('The database url for the cell database'))
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help=_('The transport url for the cell message queue'))
@args('--verbose', action='store_true',
help=_('Output the uuid of the created cell'))
@args('--disabled', action='store_true',
help=_('To create a pre-disabled cell.'))
def create_cell(self, name=None, database_connection=None,
transport_url=None, verbose=False, disabled=False):
ctxt = context.get_context()
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
database_connection = self._validate_database_connection(
database_connection)
if not database_connection:
return 1
if (self._non_unique_transport_url_database_connection_checker(ctxt,
None, transport_url, database_connection)):
return 2
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt,
uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=database_connection,
disabled=disabled)
cell_mapping.create()
if verbose:
print(cell_mapping_uuid)
return 0
@args('--verbose', action='store_true',
help=_('Show sensitive details, such as passwords'))
def list_cells(self, verbose=False):
"""Lists the v2 cells in the deployment.
By default the cell name, uuid, disabled state, masked transport
URL and database connection details are shown. Use the --verbose
option to see transport URL and database connection with their
sensitive details.
"""
cell_mappings = objects.CellMappingList.get_all(
context.get_admin_context())
field_names = [_('Name'), _('UUID'), _('Transport URL'),
_('Database Connection'), _('Disabled')]
t = prettytable.PrettyTable(field_names)
for cell in sorted(cell_mappings,
# CellMapping.name is optional
key=lambda _cell: _cell.name or ''):
fields = [cell.name or '', cell.uuid]
if verbose:
fields.extend([cell.transport_url, cell.database_connection])
else:
fields.extend([
mask_passwd_in_url(cell.transport_url),
mask_passwd_in_url(cell.database_connection)])
fields.extend([cell.disabled])
t.add_row(fields)
print(t)
return 0
@args('--force', action='store_true', default=False,
help=_('Delete hosts and instance_mappings that belong '
'to the cell as well.'))
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell to delete.'))
def delete_cell(self, cell_uuid, force=False):
"""Delete an empty cell by the given uuid.
This command will return a non-zero exit code in the following cases.
* The cell is not found by uuid.
* It has hosts and force is False.
* It has instance mappings and force is False.
If force is True and the cell has hosts and/or instance_mappings, they
are deleted as well (as long as there are no living instances).
Returns 0 in the following cases.
* The empty cell is found and deleted successfully.
* The cell has hosts and force is True then the cell, hosts and
instance_mappings are deleted successfully; if there are no
living instances.
"""
ctxt = context.get_admin_context()
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
# Check to see if there are any HostMappings for this cell.
host_mappings = objects.HostMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
nodes = []
if host_mappings:
if not force:
print(_('There are existing hosts mapped to cell with uuid '
'%s.') % cell_uuid)
return 2
# We query for the compute nodes in the cell,
# so that they can be unmapped.
with context.target_cell(ctxt, cell_mapping) as cctxt:
nodes = objects.ComputeNodeList.get_all(cctxt)
# Check to see if there are any InstanceMappings for this cell.
instance_mappings = objects.InstanceMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
if instance_mappings:
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_all(cctxt)
if instances:
# There are instances in the cell.
print(_('There are existing instances mapped to cell with '
'uuid %s.') % cell_uuid)
return 3
else:
if not force:
# There are no instances in the cell but the records remain
# in the 'instance_mappings' table.
print(_("There are instance mappings to cell with uuid "
"%s, but all instances have been deleted "
"in the cell.") % cell_uuid)
print(_("So execute 'nova-manage db archive_deleted_rows' "
"to delete the instance mappings."))
return 4
# Delete instance_mappings of the deleted instances
for instance_mapping in instance_mappings:
instance_mapping.destroy()
# Unmap the compute nodes so that they can be discovered
# again in future, if needed.
for node in nodes:
node.mapped = 0
node.save()
# Delete hosts mapped to the cell.
for host_mapping in host_mappings:
host_mapping.destroy()
# There are no hosts or instances mapped to the cell so delete it.
cell_mapping.destroy()
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell to update.'))
@args('--name', metavar='<cell_name>', dest='name',
help=_('Set the cell name.'))
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help=_('Set the cell transport_url. NOTE that running nodes '
'will not see the change until restart!'))
@args('--database_connection', metavar='<database_connection>',
dest='db_connection',
help=_('Set the cell database_connection. NOTE that running nodes '
'will not see the change until restart!'))
@args('--disable', action='store_true', dest='disable',
help=_('Disables the cell. Note that the scheduling will be blocked '
'to this cell until its enabled and followed by a SIGHUP of '
'nova-scheduler service.'))
@args('--enable', action='store_true', dest='enable',
help=_('Enables the cell. Note that this makes a disabled cell '
'available for scheduling after a SIGHUP of the '
'nova-scheduler service'))
def update_cell(self, cell_uuid, name=None, transport_url=None,
db_connection=None, disable=False, enable=False):
"""Updates the properties of a cell by the given uuid.
If the cell is not found by uuid, this command will return an exit
code of 1. If the provided transport_url or/and database_connection
is/are same as another cell, this command will return an exit code
of 3. If the properties cannot be set, this will return 2. If an
attempt is made to disable and enable a cell at the same time, this
command will exit with a return code of 4. If an attempt is made to
disable or enable cell0 this command will exit with a return code of 5.
Otherwise, the exit code will be 0.
NOTE: Updating the transport_url or database_connection fields on
a running system will NOT result in all nodes immediately using the
new values. Use caution when changing these values.
NOTE (tssurya): The scheduler will not notice that a cell has been
enabled/disabled until it is restarted or sent the SIGHUP signal.
"""
ctxt = context.get_admin_context()
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
if name:
cell_mapping.name = name
# Having empty transport_url and db_connection means leaving the
# existing values
transport_url = self._validate_transport_url(
transport_url, warn_about_none=False)
db_connection = self._validate_database_connection(
db_connection, warn_about_none=False)
if (self._non_unique_transport_url_database_connection_checker(ctxt,
cell_mapping, transport_url, db_connection)):
# We use the return code 3 before 2 to avoid changing the
# semantic meanings of return codes.
return 3
if transport_url:
cell_mapping.transport_url = transport_url
if db_connection:
cell_mapping.database_connection = db_connection
if disable and enable:
print(_('Cell cannot be disabled and enabled at the same time.'))
return 4
if disable or enable:
if cell_mapping.is_cell0():
print(_('Cell0 cannot be disabled.'))
return 5
elif disable and not cell_mapping.disabled:
cell_mapping.disabled = True
elif enable and cell_mapping.disabled:
cell_mapping.disabled = False
elif disable and cell_mapping.disabled:
print(_('Cell %s is already disabled') % cell_uuid)
elif enable and not cell_mapping.disabled:
print(_('Cell %s is already enabled') % cell_uuid)
try:
cell_mapping.save()
except Exception as e:
print(_('Unable to update CellMapping: %s') % e)
return 2
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help=_('The uuid of the cell.'))
def list_hosts(self, cell_uuid=None):
"""Lists the hosts in one or all v2 cells."""
ctxt = context.get_admin_context()
if cell_uuid:
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
host_mappings = objects.HostMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
else:
host_mappings = objects.HostMappingList.get_all(ctxt)
field_names = [_('Cell Name'), _('Cell UUID'), _('Hostname')]
t = prettytable.PrettyTable(field_names)
for host in sorted(host_mappings, key=lambda _host: _host.host):
fields = [host.cell_mapping.name, host.cell_mapping.uuid,
host.host]
t.add_row(fields)
print(t)
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell.'))
@args('--host', metavar='<host>', dest='host',
required=True, help=_('The host to delete.'))
def delete_host(self, cell_uuid, host):
"""Delete a host in a cell (host mappings) by the given host name
This command will return a non-zero exit code in the following cases.
* The cell is not found by uuid.
* The host is not found by host name.
* The host is not in the cell.
* The host has instances.
Returns 0 if the host is deleted successfully.
NOTE: The scheduler caches host-to-cell mapping information so when
deleting a host the scheduler may need to be restarted or sent the
SIGHUP signal.
"""
ctxt = context.get_admin_context()
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
try:
host_mapping = objects.HostMapping.get_by_host(ctxt, host)
except exception.HostMappingNotFound:
print(_('The host %s was not found.') % host)
return 2
if host_mapping.cell_mapping.uuid != cell_mapping.uuid:
print(_('The host %(host)s was not found '
'in the cell %(cell_uuid)s.') % {'host': host,
'cell_uuid': cell_uuid})
return 3
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_host(cctxt, host)
try:
nodes = objects.ComputeNodeList.get_all_by_host(cctxt, host)
except exception.ComputeHostNotFound:
nodes = []
if instances:
print(_('There are instances on the host %s.') % host)
return 4
for node in nodes:
node.mapped = 0
node.save()
host_mapping.destroy()
return 0
class PlacementCommands(object):
"""Commands for managing placement resources."""
@staticmethod
def _get_compute_node_uuid(ctxt, instance, node_cache):
"""Find the ComputeNode.uuid for the given Instance
:param ctxt: cell-targeted nova.context.RequestContext
:param instance: the instance to lookup a compute node
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:returns: ComputeNode.uuid for the given instance
:raises: nova.exception.ComputeHostNotFound
"""
if instance.node in node_cache:
return node_cache[instance.node]
compute_node = objects.ComputeNode.get_by_host_and_nodename(
ctxt, instance.host, instance.node)
node_uuid = compute_node.uuid
node_cache[instance.node] = node_uuid
return node_uuid
@staticmethod
def _get_ports(ctxt, instance, neutron):
"""Return the ports that are bound to the instance
:param ctxt: nova.context.RequestContext
:param instance: the instance to return the ports for
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:return: a list of neutron port dict objects
:raise UnableToQueryPorts: If the neutron list ports query fails.
"""
try:
return neutron.list_ports(
ctxt, device_id=instance.uuid,
fields=['id', constants.RESOURCE_REQUEST,
constants.BINDING_PROFILE]
)['ports']
except neutron_client_exc.NeutronClientException as e:
raise exception.UnableToQueryPorts(
instance_uuid=instance.uuid, error=str(e))
@staticmethod
def _has_request_but_no_allocation(port, neutron):
has_res_req = neutron_api.API()._has_resource_request(
context.get_admin_context(), port, neutron)
binding_profile = neutron_api.get_binding_profile(port)
allocation = binding_profile.get(constants.ALLOCATION)
return has_res_req and not allocation
@staticmethod
def _merge_allocations(alloc1, alloc2):
"""Return a new allocation dict that contains the sum of alloc1 and
alloc2.
:param alloc1: a dict in the form of
{
<rp_uuid>: {'resources': {<resource class>: amount,
<resource class>: amount},
<rp_uuid>: {'resources': {<resource class>: amount},
}
:param alloc2: a dict in the same form as alloc1
:return: the merged allocation of alloc1 and alloc2 in the same format
"""
allocations = collections.defaultdict(
lambda: {'resources': collections.defaultdict(int)})
for alloc in [alloc1, alloc2]:
for rp_uuid in alloc:
for rc, amount in alloc[rp_uuid]['resources'].items():
allocations[rp_uuid]['resources'][rc] += amount
return allocations
@staticmethod
def _get_resource_request_from_ports(
ctxt: context.RequestContext,
ports: ty.List[ty.Dict[str, ty.Any]]
) -> ty.Tuple[
ty.Dict[str, ty.List['objects.RequestGroup']],
'objects.RequestLevelParams']:
"""Collect RequestGroups and RequestLevelParams for all ports
:param ctxt: the request context
:param ports: a list of port dicts
:returns: A two tuple where the first item is a dict mapping port
uuids to a list of request groups coming from that port, the
second item is a combined RequestLevelParams object from all ports.
"""
groups = {}
request_level_params = objects.RequestLevelParams()
extended_res_req = (
neutron_api.API().has_extended_resource_request_extension(
ctxt)
)
for port in ports:
resource_request = port.get(constants.RESOURCE_REQUEST)
if extended_res_req:
groups[port['id']] = (
objects.RequestGroup.from_extended_port_request(
ctxt, resource_request
)
)
request_level_params.extend_with(
objects.RequestLevelParams.from_port_request(
resource_request
)
)
else:
# This is the legacy format, only one group per port and no
# request level param support
# TODO(gibi): remove this path once the extended resource
# request extension is mandatory in neutron
groups[port['id']] = [
objects.RequestGroup.from_port_request(
ctxt, port['id'], resource_request
)
]
return groups, request_level_params
@staticmethod
def _get_port_binding_profile_allocation(
ctxt: context.RequestContext,
neutron: neutron_api.ClientWrapper,
port: ty.Dict[str, ty.Any],
request_groups: ty.List['objects.RequestGroup'],
resource_provider_mapping: ty.Dict[str, ty.List[str]],
) -> ty.Dict[str, str]:
"""Generate the value of the allocation key of the port binding profile
based on the provider mapping returned from placement
:param ctxt: the request context
:param neutron: the neutron client
:param port: the port dict from neutron
:param request_groups: the list of RequestGroups object generated from
the port resource request
:param resource_provider_mapping: The dict of request group to resource
provider mapping returned by the Placement allocation candidate
query
:returns: a dict mapping request group ids to resource provider uuids
in the form as Neutron expects in the port binding profile.
"""
if neutron_api.API().has_extended_resource_request_extension(
ctxt, neutron
):
# The extended resource request format also means that a
# port has more than a one request groups.
# Each request group id from the port needs to be mapped to
# a single provider id from the provider mappings. Each
# group from the port is mapped to a numbered request group
# in placement so we can assume that they are mapped to
# a single provider and therefore the provider mapping list
# has a single provider id.
allocation = {
group.requester_id: resource_provider_mapping[
group.requester_id][0]
for group in request_groups
}
else:
# This is the legacy resource request format where a port
# is mapped to a single request group
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
# requests of a Neutron port is always mapped to a
# numbered request group that is always fulfilled by one
# resource provider. So we only pass that single RP UUID
# here.
allocation = resource_provider_mapping[
port['id']][0]
return allocation
def _get_port_allocations_to_heal(
self, ctxt, instance, node_cache, placement, neutron, output):
"""Return the needed extra allocation for the ports of the instance.
:param ctxt: nova.context.RequestContext
:param instance: instance to get the port allocations for
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:param output: function that takes a single message for verbose output
:raise UnableToQueryPorts: If the neutron list ports query fails.
:raise nova.exception.ComputeHostNotFound: if compute node of the
instance not found in the db.
:raise PlacementAPIConnectFailure: if placement API cannot be reached
:raise AllocationUpdateFailed: if there is either no allocation
candidate returned from placement for the missing port allocations
or there are more than one candidates making the healing
ambiguous.
:return: A two tuple where the first item is a dict of resources keyed
by RP uuid to be included in the instance allocation dict. The
second item is a list of port dicts to be updated in Neutron.
"""
# We need to heal port allocations for ports that have resource_request
# but do not have an RP uuid in the binding:profile.allocation field.
# We cannot use the instance info_cache to check the binding profile
# as this code needs to be able to handle ports that were attached
# before nova in stein started updating the allocation key in the
# binding:profile.
# In theory a port can be assigned to an instance without it being
# bound to any host (e.g. in case of shelve offload) but
# _heal_allocations_for_instance() already filters out instances that
# are not on any host.
ports_to_heal = [
port for port in self._get_ports(ctxt, instance, neutron)
if self._has_request_but_no_allocation(port, neutron)]
if not ports_to_heal:
# nothing to do, return early
return {}, []
node_uuid = self._get_compute_node_uuid(
ctxt, instance, node_cache)
# NOTE(gibi): We need to handle both legacy and extended resource
# request. So we need to handle ports with multiple request groups
# allocating from multiple providers.
# The logic what we follow here is pretty similar to the logic
# implemented in ComputeManager._allocate_port_resource_for_instance
# for the interface attach case. We just apply it to more then one
# ports here.
request_groups_per_port, req_lvl_params = (
self._get_resource_request_from_ports(ctxt, ports_to_heal)
)
# flatten the list of list of groups
request_groups = [
group
for groups in request_groups_per_port.values()
for group in groups
]
# we can have multiple request groups, it would be enough to restrict
# only one of them to the compute tree but for symmetry we restrict
# all of them
for request_group in request_groups:
request_group.in_tree = node_uuid
# If there are multiple groups then the group_policy is mandatory in
# the allocation candidate query. We can assume that if this instance
# booted successfully then we have the policy in the flavor. If there
# is only one group and therefore no policy then the value of the
# policy in the allocation candidate query is ignored, so we simply
# default it here.
group_policy = instance.flavor.extra_specs.get("group_policy", "none")
rr = scheduler_utils.ResourceRequest.from_request_groups(
request_groups, req_lvl_params, group_policy)
res = placement.get_allocation_candidates(ctxt, rr)
# NOTE(gibi): the get_allocation_candidates method has the
# @safe_connect decorator applied. Such decorator will return None
# if the connection to Placement is failed. So we raise an exception
# here. The case when Placement successfully return a response, even
# if it is a negative or empty response, the method will return a three
# tuple. That case is handled couple of lines below.
if not res:
raise exception.PlacementAPIConnectFailure()
alloc_reqs, __, __ = res
if not alloc_reqs:
port_ids = [port['id'] for port in ports_to_heal]
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=f'Placement returned no allocation candidate to fulfill '
f'the resource request of the port(s) {port_ids}'
)
if len(alloc_reqs) > 1:
# If there is more than one candidates then it is an ambiguous
# situation that we cannot handle here because selecting the right
# one might need extra information from the compute node. For
# example which PCI PF the VF is allocated from and which RP
# represents that PCI PF in placement.
# TODO(gibi): One way to get that missing information to resolve
# ambiguity would be to load up the InstancePciRequest objects and
# try to use the parent_if_name in their spec to find the proper
# candidate that allocates for the same port from the PF RP that
# has the same name.
port_ids = [port['id'] for port in ports_to_heal]
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=f'Placement returned more than one possible allocation '
f'candidates to fulfill the resource request of the '
f'port(s) {port_ids}. This script does not have enough '
f'information to select the proper candidate to heal the'
f'missing allocations. A possible way to heal the'
f'allocation of this instance is to migrate it to '
f'another compute as the migration process re-creates '
f'the full allocation on the target host.'
)
# so we have one candidate, lets use that to get the needed allocations
# and the provider mapping for the ports' binding profile
alloc_req = alloc_reqs[0]
allocations = alloc_req["allocations"]
provider_mappings = alloc_req["mappings"]
for port in ports_to_heal:
# We also need to record the RPs we are allocated from in the
# port. This will be sent back to Neutron before the allocation
# is updated in placement
profile_allocation = self._get_port_binding_profile_allocation(
ctxt, neutron, port, request_groups_per_port[port['id']],
provider_mappings
)
binding_profile = neutron_api.get_binding_profile(port)
binding_profile[constants.ALLOCATION] = profile_allocation
port[constants.BINDING_PROFILE] = binding_profile
output(_(
"Found a request group : resource provider mapping "
"%(mapping)s for the port %(port_uuid)s with resource request "
"%(request)s attached to the instance %(instance_uuid)s") %
{"mapping": profile_allocation, "port_uuid": port['id'],
"request": port.get(constants.RESOURCE_REQUEST),
"instance_uuid": instance.uuid}
)
return allocations, ports_to_heal
def _update_ports(self, neutron, ports_to_update, output):
succeeded = []
try:
for port in ports_to_update:
profile = neutron_api.get_binding_profile(port)
body = {
'port': {
constants.BINDING_PROFILE: profile
}
}
output(
_('Updating port %(port_uuid)s with attributes '
'%(attributes)s') %
{'port_uuid': port['id'], 'attributes': body['port']})
neutron.update_port(port['id'], body=body)
succeeded.append(port)
except neutron_client_exc.NeutronClientException as e:
output(
_('Updating port %(port_uuid)s failed: %(error)s') %
{'port_uuid': port['id'], 'error': str(e)})
# one of the port updates failed. We need to roll back the updates
# that succeeded before
self._rollback_port_updates(neutron, succeeded, output)
# we failed to heal so we need to stop but we successfully rolled
# back the partial updates so the admin can retry the healing.
raise exception.UnableToUpdatePorts(error=str(e))
@staticmethod
def _rollback_port_updates(neutron, ports_to_rollback, output):
# _update_ports() added the allocation key to these ports, so we need
# to remove them during the rollback.
manual_rollback_needed = []
last_exc = None
for port in ports_to_rollback:
profile = neutron_api.get_binding_profile(port)
profile.pop(constants.ALLOCATION)
body = {
'port': {
constants.BINDING_PROFILE: profile
}
}
try:
output(_('Rolling back port update for %(port_uuid)s') %
{'port_uuid': port['id']})
neutron.update_port(port['id'], body=body)
except neutron_client_exc.NeutronClientException as e:
output(
_('Rolling back update for port %(port_uuid)s failed: '
'%(error)s') % {'port_uuid': port['id'],
'error': str(e)})
# TODO(gibi): We could implement a retry mechanism with
# back off.
manual_rollback_needed.append(port['id'])
last_exc = e
if manual_rollback_needed:
# At least one of the port operation failed so we failed to roll
# back. There are partial updates in neutron. Human intervention
# needed.
raise exception.UnableToRollbackPortUpdates(
error=str(last_exc),
port_uuids=manual_rollback_needed)
def _heal_missing_alloc(self, ctxt, instance, node_cache):
node_uuid = self._get_compute_node_uuid(
ctxt, instance, node_cache)
# Now get the resource allocations for the instance based
# on its embedded flavor.
resources = scheduler_utils.resources_from_flavor(
instance, instance.flavor)
payload = {
'allocations': {
node_uuid: {'resources': resources},
},
'project_id': instance.project_id,
'user_id': instance.user_id,
'consumer_generation': None
}
return payload
def _heal_missing_project_and_user_id(self, allocations, instance):
allocations['project_id'] = instance.project_id
allocations['user_id'] = instance.user_id
return allocations
@staticmethod
def ensure_instance_has_no_vgpu_request(instance):
if instance.flavor.extra_specs.get("resources:VGPU"):
raise exception.HealvGPUAllocationNotSupported(
instance_uuid=instance.uuid)
@staticmethod
def ensure_instance_has_no_cyborg_device_profile_request(instance):
if instance.flavor.extra_specs.get("accel:device_profile"):
raise exception.HealDeviceProfileAllocationNotSupported(
instance_uuid=instance.uuid)
def _heal_allocations_for_instance(self, ctxt, instance, node_cache,
output, placement, dry_run,
heal_port_allocations, neutron,
force):
"""Checks the given instance to see if it needs allocation healing
:param ctxt: cell-targeted nova.context.RequestContext
:param instance: the instance to check for allocation healing
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:param output: function that takes a single message for verbose output
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param dry_run: Process instances and print output but do not commit
any changes.
:param heal_port_allocations: True if healing port allocation is
requested, False otherwise.
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:param force: True if force healing is requested for particular
instance, False otherwise.
:return: True if allocations were created or updated for the instance,
None if nothing needed to be done
:raises: nova.exception.ComputeHostNotFound if a compute node for a
given instance cannot be found
:raises: AllocationCreateFailed if unable to create allocations for
a given instance against a given compute node resource provider
:raises: AllocationUpdateFailed if unable to update allocations for
a given instance with consumer project/user information
:raise UnableToQueryPorts: If the neutron list ports query fails.
:raise PlacementAPIConnectFailure: if placement API cannot be reached
:raise UnableToUpdatePorts: if a port update failed in neutron but any
partial update was rolled back successfully.
:raise UnableToRollbackPortUpdates: if a port update failed in neutron
and the rollback of the partial updates also failed.
"""
if instance.task_state is not None:
output(_('Instance %(instance)s is undergoing a task '
'state transition: %(task_state)s') %
{'instance': instance.uuid,
'task_state': instance.task_state})
return
if instance.node is None:
output(_('Instance %s is not on a host.') % instance.uuid)
return
self.ensure_instance_has_no_vgpu_request(instance)
self.ensure_instance_has_no_cyborg_device_profile_request(instance)
try:
allocations = placement.get_allocs_for_consumer(
ctxt, instance.uuid)
except (ks_exc.ClientException,
exception.ConsumerAllocationRetrievalFailed) as e:
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=_("Allocation retrieval failed: %s") % e)
need_healing = False
# Placement response can have an empty {'allocations': {}} in it if
# there are no allocations for the instance
if not allocations.get('allocations'):
# This instance doesn't have allocations
need_healing = _CREATE
allocations = self._heal_missing_alloc(ctxt, instance, node_cache)
if (allocations.get('project_id') != instance.project_id or
allocations.get('user_id') != instance.user_id):
# We have an instance with allocations but not the correct
# project_id/user_id, so we want to update the allocations
# and re-put them. We don't use put_allocations here
# because we don't want to mess up shared or nested
# provider allocations.
need_healing = _UPDATE
allocations = self._heal_missing_project_and_user_id(
allocations, instance)
if force:
output(_('Force flag passed for instance %s') % instance.uuid)
need_healing = _UPDATE
# get default allocations
alloc = self._heal_missing_alloc(ctxt, instance, node_cache)
# set consumer generation of existing allocations
alloc["consumer_generation"] = allocations["consumer_generation"]
# set allocations
allocations = alloc
if heal_port_allocations:
to_heal = self._get_port_allocations_to_heal(
ctxt, instance, node_cache, placement, neutron, output)
port_allocations, ports_to_update = to_heal
else:
port_allocations, ports_to_update = {}, []
if port_allocations:
need_healing = need_healing or _UPDATE
# Merge in any missing port allocations
allocations['allocations'] = self._merge_allocations(
allocations['allocations'], port_allocations)
if need_healing:
if dry_run:
# json dump the allocation dict as it contains nested default
# dicts that is pretty hard to read in the verbose output
alloc = jsonutils.dumps(allocations)
if need_healing == _CREATE:
output(_('[dry-run] Create allocations for instance '
'%(instance)s: %(allocations)s') %
{'instance': instance.uuid,
'allocations': alloc})
elif need_healing == _UPDATE:
output(_('[dry-run] Update allocations for instance '
'%(instance)s: %(allocations)s') %
{'instance': instance.uuid,
'allocations': alloc})
else:
# First update ports in neutron. If any of those operations
# fail, then roll back the successful part of it and fail the
# healing. We do this first because rolling back the port
# updates is more straight-forward than rolling back allocation
# changes.
self._update_ports(neutron, ports_to_update, output)
# Now that neutron update succeeded we can try to update
# placement. If it fails we need to rollback every neutron port
# update done before.
resp = placement.put_allocations(ctxt, instance.uuid,
allocations)
if resp:
if need_healing == _CREATE:
output(_('Successfully created allocations for '
'instance %(instance)s.') %
{'instance': instance.uuid})
elif need_healing == _UPDATE:
output(_('Successfully updated allocations for '
'instance %(instance)s.') %
{'instance': instance.uuid})
return True
else:
# Rollback every neutron update. If we succeed to
# roll back then it is safe to stop here and let the admin
# retry. If the rollback fails then
# _rollback_port_updates() will raise another exception
# that instructs the operator how to clean up manually
# before the healing can be retried
self._rollback_port_updates(
neutron, ports_to_update, output)
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid, error='')
else:
output(_('The allocation of instance %s is up-to-date. '
'Nothing to be healed.') % instance.uuid)
return
def _heal_instances_in_cell(self, ctxt, max_count, unlimited, output,
placement, dry_run, instance_uuid,
heal_port_allocations, neutron,
force):
"""Checks for instances to heal in a given cell.
:param ctxt: cell-targeted nova.context.RequestContext
:param max_count: batch size (limit per instance query)
:param unlimited: True if all instances in the cell should be
processed, else False to just process $max_count instances
:param output: function that takes a single message for verbose output
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param dry_run: Process instances and print output but do not commit
any changes.
:param instance_uuid: UUID of a specific instance to process.
:param heal_port_allocations: True if healing port allocation is
requested, False otherwise.
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:param force: True if force healing is requested for particular
instance, False otherwise.
:return: Number of instances that had allocations created.
:raises: nova.exception.ComputeHostNotFound if a compute node for a
given instance cannot be found
:raises: AllocationCreateFailed if unable to create allocations for
a given instance against a given compute node resource provider
:raises: AllocationUpdateFailed if unable to update allocations for
a given instance with consumer project/user information
:raise UnableToQueryPorts: If the neutron list ports query fails.
:raise PlacementAPIConnectFailure: if placement API cannot be reached
:raise UnableToUpdatePorts: if a port update failed in neutron but any
partial update was rolled back successfully.
:raise UnableToRollbackPortUpdates: if a port update failed in neutron
and the rollback of the partial updates also failed.
"""
# Keep a cache of instance.node to compute node resource provider UUID.
# This will save some queries for non-ironic instances to the
# compute_nodes table.
node_cache = {}
# Track the total number of instances that have allocations created
# for them in this cell. We return when num_processed equals max_count
# and unlimited=True or we exhaust the number of instances to process
# in this cell.
num_processed = 0
# Get all instances from this cell which have a host and are not
# undergoing a task state transition. Go from oldest to newest.
# NOTE(mriedem): Unfortunately we don't have a marker to use
# between runs where the user is specifying --max-count.
# TODO(mriedem): Store a marker in system_metadata so we can
# automatically pick up where we left off without the user having
# to pass it in (if unlimited is False).
filters = {'deleted': False}
if instance_uuid:
filters['uuid'] = instance_uuid
instances = objects.InstanceList.get_by_filters(
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
limit=max_count, expected_attrs=['flavor'])
while instances:
output(_('Found %s candidate instances.') % len(instances))
# For each instance in this list, we need to see if it has
# allocations in placement and if so, assume it's correct and
# continue.
for instance in instances:
if self._heal_allocations_for_instance(
ctxt, instance, node_cache, output, placement,
dry_run, heal_port_allocations, neutron, force):
num_processed += 1
# Make sure we don't go over the max count. Note that we
# don't include instances that already have allocations in the
# max_count number, only the number of instances that have
# successfully created allocations.
# If a specific instance was requested we return here as well.
if (not unlimited and num_processed == max_count) or instance_uuid:
return num_processed
# Use a marker to get the next page of instances in this cell.
# Note that InstanceList doesn't support slice notation.
marker = instances[len(instances) - 1].uuid
instances = objects.InstanceList.get_by_filters(
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
limit=max_count, marker=marker, expected_attrs=['flavor'])
return num_processed
@action_description(
_("Iterates over non-cell0 cells looking for instances which do "
"not have allocations in the Placement service, or have incomplete "
"consumer project_id/user_id values in existing allocations or "
"missing allocations for ports having resource request, and "
"which are not undergoing a task state transition. For each "
"instance found, allocations are created (or updated) against the "
"compute node resource provider for that instance based on the "
"flavor associated with the instance. This command requires that "
"the [api_database]/connection and [placement] configuration "
"options are set."))
@args('--max-count', metavar='<max_count>', dest='max_count',
help='Maximum number of instances to process. If not specified, all '
'instances in each cell will be mapped in batches of 50. '
'If you have a large number of instances, consider specifying '
'a custom value and run the command until it exits with '
'0 or 4.')
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
@args('--dry-run', action='store_true', dest='dry_run', default=False,
help='Runs the command and prints output but does not commit any '
'changes. The return code should be 4.')
@args('--instance', metavar='<instance_uuid>', dest='instance_uuid',
help='UUID of a specific instance to process. If specified '
'--max-count has no effect. '
'The --cell and --instance options are mutually exclusive.')
@args('--skip-port-allocations', action='store_true',
dest='skip_port_allocations', default=False,
help='Skip the healing of the resource allocations of bound ports. '
'E.g. healing bandwidth resource allocation for ports having '
'minimum QoS policy rules attached. If your deployment does '
'not use such a feature then the performance impact of '
'querying neutron ports for each instance can be avoided with '
'this flag.')
@args('--cell', metavar='<cell_uuid>', dest='cell_uuid',
help='Heal allocations within a specific cell. '
'The --cell and --instance options are mutually exclusive.')
@args('--force', action='store_true', dest='force', default=False,
help='Force heal allocations. Requires the --instance argument.')
def heal_allocations(self, max_count=None, verbose=False, dry_run=False,
instance_uuid=None, skip_port_allocations=False,
cell_uuid=None, force=False):
"""Heals instance allocations in the Placement service
Return codes:
* 0: Command completed successfully and allocations were created.
* 1: --max-count was reached and there are more instances to process.
* 2: Unable to find a compute node record for a given instance.
* 3: Unable to create (or update) allocations for an instance against
its compute node resource provider.
* 4: Command completed successfully but no allocations were created.
* 5: Unable to query ports from neutron
* 6: Unable to update ports in neutron
* 7: Cannot roll back neutron port updates. Manual steps needed.
* 8: Cannot heal instance with vGPU or Cyborg resource request
* 127: Invalid input.
"""
# NOTE(mriedem): Thoughts on ways to expand this:
# - allow filtering on enabled/disabled cells
# - add a force option to force allocations for instances which have
# task_state is not None (would get complicated during a migration);
# for example, this could cleanup ironic instances that have
# allocations on VCPU/MEMORY_MB/DISK_GB but are now using a custom
# resource class
# - deal with nested resource providers?
heal_port_allocations = not skip_port_allocations
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
# If user has provided both cell and instance
# Throw an error
if instance_uuid and cell_uuid:
print(_('The --cell and --instance options '
'are mutually exclusive.'))
return 127
if force and not instance_uuid:
print(_('The --instance flag is required '
'when using --force flag.'))
return 127
# TODO(mriedem): Rather than --max-count being both a total and batch
# count, should we have separate options to be specific, i.e. --total
# and --batch-size? Then --batch-size defaults to 50 and --total
# defaults to None to mean unlimited.
if instance_uuid:
max_count = 1
unlimited = False
elif max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive integer for --max-count.'))
return 127
else:
max_count = 50
unlimited = True
output(_('Running batches of %i until complete') % max_count)
ctxt = context.get_admin_context()
# If we are going to process a specific instance, just get the cell
# it is in up front.
if instance_uuid:
try:
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
cells = objects.CellMappingList(objects=[im.cell_mapping])
except exception.InstanceMappingNotFound:
print('Unable to find cell for instance %s, is it mapped? Try '
'running "nova-manage cell_v2 verify_instance" or '
'"nova-manage cell_v2 map_instances".' %
instance_uuid)
return 127
elif cell_uuid:
try:
# validate cell_uuid
cell = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# create CellMappingList
cells = objects.CellMappingList(objects=[cell])
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 127
else:
cells = objects.CellMappingList.get_all(ctxt)
if not cells:
output(_('No cells to process.'))
return 4
placement = report.SchedulerReportClient()
neutron = None
if heal_port_allocations:
neutron = neutron_api.get_client(ctxt, admin=True)
num_processed = 0
# TODO(mriedem): Use context.scatter_gather_skip_cell0.
for cell in cells:
# Skip cell0 since that is where instances go that do not get
# scheduled and hence would not have allocations against a host.
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
output(_('Looking for instances in cell: %s') % cell.identity)
limit_per_cell = max_count
if not unlimited:
# Adjust the limit for the next cell. For example, if the user
# only wants to process a total of 100 instances and we did
# 75 in cell1, then we only need 25 more from cell2 and so on.
limit_per_cell = max_count - num_processed
with context.target_cell(ctxt, cell) as cctxt:
try:
num_processed += self._heal_instances_in_cell(
cctxt, limit_per_cell, unlimited, output, placement,
dry_run, instance_uuid, heal_port_allocations, neutron,
force)
except exception.ComputeHostNotFound as e:
print(e.format_message())
return 2
except (
exception.AllocationCreateFailed,
exception.AllocationUpdateFailed,
exception.PlacementAPIConnectFailure
) as e:
print(e.format_message())
return 3
except exception.UnableToQueryPorts as e:
print(e.format_message())
return 5
except exception.UnableToUpdatePorts as e:
print(e.format_message())
return 6
except exception.UnableToRollbackPortUpdates as e:
print(e.format_message())
return 7
except (
exception.HealvGPUAllocationNotSupported,
exception.HealDeviceProfileAllocationNotSupported,
) as e:
print(e.format_message())
return 8
# Make sure we don't go over the max count. Note that we
# don't include instances that already have allocations in the
# max_count number, only the number of instances that have
# successfully created allocations.
# If a specific instance was provided then we'll just exit
# the loop and process it below (either return 4 or 0).
if num_processed == max_count and not instance_uuid:
output(_('Max count reached. Processed %s instances.')
% num_processed)
return 1
output(_('Processed %s instances.') % num_processed)
if not num_processed:
return 4
return 0
@staticmethod
def _get_rp_uuid_for_host(ctxt, host):
"""Finds the resource provider (compute node) UUID for the given host.
:param ctxt: cell-targeted nova RequestContext
:param host: name of the compute host
:returns: The UUID of the resource provider (compute node) for the host
:raises: nova.exception.HostMappingNotFound if no host_mappings record
is found for the host; indicates
"nova-manage cell_v2 discover_hosts" needs to be run on the cell.
:raises: nova.exception.ComputeHostNotFound if no compute_nodes record
is found in the cell database for the host; indicates the
nova-compute service on that host might need to be restarted.
:raises: nova.exception.TooManyComputesForHost if there are more than
one compute_nodes records in the cell database for the host which
is only possible (under normal circumstances) for ironic hosts but
ironic hosts are not currently supported with host aggregates so
if more than one compute node is found for the host, it is
considered an error which the operator will need to resolve
manually.
"""
# Get the host mapping to determine which cell it's in.
hm = objects.HostMapping.get_by_host(ctxt, host)
# Now get the compute node record for the host from the cell.
with context.target_cell(ctxt, hm.cell_mapping) as cctxt:
# There should really only be one, since only ironic
# hosts can have multiple nodes, and you can't have
# ironic hosts in aggregates for that reason. If we
# find more than one, it's an error.
nodes = objects.ComputeNodeList.get_all_by_host(
cctxt, host)
if len(nodes) > 1:
# This shouldn't happen, so we need to bail since we
# won't know which node to use.
raise exception.TooManyComputesForHost(
num_computes=len(nodes), host=host)
return nodes[0].uuid
@action_description(
_("Mirrors compute host aggregates to resource provider aggregates "
"in the Placement service. Requires the [api_database] and "
"[placement] sections of the nova configuration file to be "
"populated."))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
# TODO(mriedem): Add an option for the 'remove aggregate' behavior.
# We know that we want to mirror hosts aggregate membership to
# placement, but regarding removal, what if the operator or some external
# tool added the resource provider to an aggregate but there is no matching
# host aggregate, e.g. ironic nodes or shared storage provider
# relationships?
# TODO(mriedem): Probably want an option to pass a specific host instead of
# doing all of them.
def sync_aggregates(self, verbose=False):
"""Synchronizes nova host aggregates with resource provider aggregates
Adds nodes to missing provider aggregates in Placement.
NOTE: Depending on the size of your deployment and the number of
compute hosts in aggregates, this command could cause a non-negligible
amount of traffic to the placement service and therefore is
recommended to be run during maintenance windows.
Return codes:
* 0: Successful run
* 1: A host was found with more than one matching compute node record
* 2: An unexpected error occurred while working with the placement API
* 3: Failed updating provider aggregates in placement
* 4: Host mappings not found for one or more host aggregate members
* 5: Compute node records not found for one or more hosts
* 6: Resource provider not found by uuid for a given host
"""
# Start by getting all host aggregates.
ctxt = context.get_admin_context()
aggregate_api = api.AggregateAPI()
placement = aggregate_api.placement_client
aggregates = aggregate_api.get_aggregate_list(ctxt)
# Now we're going to loop over the existing compute hosts in aggregates
# and check to see if their corresponding resource provider, found via
# the host's compute node uuid, are in the same aggregate. If not, we
# add the resource provider to the aggregate in Placement.
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
output(_('Filling in missing placement aggregates'))
# Since hosts can be in more than one aggregate, keep track of the host
# to its corresponding resource provider uuid to avoid redundant
# lookups.
host_to_rp_uuid = {}
unmapped_hosts = set() # keep track of any missing host mappings
computes_not_found = set() # keep track of missing nodes
providers_not_found = {} # map of hostname to missing provider uuid
for aggregate in aggregates:
output(_('Processing aggregate: %s') % aggregate.name)
for host in aggregate.hosts:
output(_('Processing host: %s') % host)
rp_uuid = host_to_rp_uuid.get(host)
if not rp_uuid:
try:
rp_uuid = self._get_rp_uuid_for_host(ctxt, host)
host_to_rp_uuid[host] = rp_uuid
except exception.HostMappingNotFound:
# Don't fail on this now, we can dump it at the end.
unmapped_hosts.add(host)
continue
except exception.ComputeHostNotFound:
# Don't fail on this now, we can dump it at the end.
computes_not_found.add(host)
continue
except exception.TooManyComputesForHost as e:
# TODO(mriedem): Should we treat this like the other
# errors and not fail immediately but dump at the end?
print(e.format_message())
return 1
# We've got our compute node record, so now we can ensure that
# the matching resource provider, found via compute node uuid,
# is in the same aggregate in placement, found via aggregate
# uuid.
try:
placement.aggregate_add_host(ctxt, aggregate.uuid,
rp_uuid=rp_uuid)
output(_('Successfully added host (%(host)s) and '
'provider (%(provider)s) to aggregate '
'(%(aggregate)s).') %
{'host': host, 'provider': rp_uuid,
'aggregate': aggregate.uuid})
except exception.ResourceProviderNotFound:
# The resource provider wasn't found. Store this for later.
providers_not_found[host] = rp_uuid
except exception.ResourceProviderAggregateRetrievalFailed as e:
print(e.message)
return 2
except exception.NovaException as e:
# The exception message is too generic in this case
print(_('Failed updating provider aggregates for '
'host (%(host)s), provider (%(provider)s) '
'and aggregate (%(aggregate)s). Error: '
'%(error)s') %
{'host': host, 'provider': rp_uuid,
'aggregate': aggregate.uuid,
'error': e.message})
return 3
# Now do our error handling. Note that there is no real priority on
# the error code we return. We want to dump all of the issues we hit
# so the operator can fix them before re-running the command, but
# whether we return 4 or 5 or 6 doesn't matter.
return_code = 0
if unmapped_hosts:
print(_('The following hosts were found in nova host aggregates '
'but no host mappings were found in the nova API DB. Run '
'"nova-manage cell_v2 discover_hosts" and then retry. '
'Missing: %s') % ','.join(unmapped_hosts))
return_code = 4
if computes_not_found:
print(_('Unable to find matching compute_nodes record entries in '
'the cell database for the following hosts; does the '
'nova-compute service on each host need to be restarted? '
'Missing: %s') % ','.join(computes_not_found))
return_code = 5
if providers_not_found:
print(_('Unable to find matching resource provider record in '
'placement with uuid for the following hosts: %s. Try '
'restarting the nova-compute service on each host and '
'then retry.') %
','.join('(%s=%s)' % (host, providers_not_found[host])
for host in sorted(providers_not_found.keys())))
return_code = 6
return return_code
def _get_instances_and_current_migrations(self, ctxt, cn_uuid):
if self.cn_uuid_mapping.get(cn_uuid):
cell_uuid, cn_host, cn_node = self.cn_uuid_mapping[cn_uuid]
else:
# We need to find the compute node record from all cells.
results = context.scatter_gather_skip_cell0(
ctxt, objects.ComputeNode.get_by_uuid, cn_uuid)
for result_cell_uuid, result in results.items():
if not context.is_cell_failure_sentinel(result):
cn = result
cell_uuid = result_cell_uuid
break
else:
return False
cn_host, cn_node = (cn.host, cn.hypervisor_hostname)
self.cn_uuid_mapping[cn_uuid] = (cell_uuid, cn_host, cn_node)
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# Get all the active instances from this compute node
if self.instances_mapping.get(cn_uuid):
inst_uuids = self.instances_mapping[cn_uuid]
else:
# Get the instance list record from the cell.
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_host_and_node(
cctxt, cn_host, cn_node, expected_attrs=[])
inst_uuids = [instance.uuid for instance in instances]
self.instances_mapping[cn_uuid] = inst_uuids
# Get all *active* migrations for this compute node
# NOTE(sbauza): Since migrations are transient, it's better to not
# cache the results as they could be stale
with context.target_cell(ctxt, cell_mapping) as cctxt:
migs = objects.MigrationList.get_in_progress_by_host_and_node(
cctxt, cn_host, cn_node)
mig_uuids = [migration.uuid for migration in migs]
return (inst_uuids, mig_uuids)
def _delete_allocations_from_consumer(self, ctxt, placement, provider,
consumer_uuid, consumer_type):
"""Deletes allocations from a resource provider with consumer UUID.
:param ctxt: nova.context.RequestContext
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param provider: Resource Provider to look at.
:param consumer_uuid: the consumer UUID having allocations.
:param consumer_type: the type of consumer,
either 'instance' or 'migration'
:returns: bool whether the allocations were deleted.
"""
# We need to be careful and only remove the allocations
# against this specific RP or we would delete the
# whole instance usage and then it would require some
# healing.
# TODO(sbauza): Remove this extra check once placement
# supports querying allocation delete on both
# consumer and resource provider parameters.
allocations = placement.get_allocs_for_consumer(
ctxt, consumer_uuid)
if len(allocations['allocations']) > 1:
# This consumer has resources spread among multiple RPs (think
# nested or shared for example)
# We then need to just update the usage to remove
# the orphaned resources on the specific RP
del allocations['allocations'][provider['uuid']]
try:
placement.put_allocations(
ctxt, consumer_uuid, allocations)
except exception.AllocationUpdateFailed:
return False
else:
try:
placement.delete_allocation_for_instance(
ctxt, consumer_uuid, consumer_type, force=True)
except exception.AllocationDeleteFailed:
return False
return True
def _check_orphaned_allocations_for_provider(self, ctxt, placement,
output, provider,
delete):
"""Finds orphaned allocations for a specific resource provider.
:param ctxt: nova.context.RequestContext
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param output: function that takes a single message for verbose output
:param provider: Resource Provider to look at.
:param delete: deletes the found orphaned allocations.
:return: a tuple (<number of orphaned allocs>, <number of faults>)
"""
num_processed = 0
faults = 0
# TODO(sbauza): Are we sure we have all Nova RCs ?
# FIXME(sbauza): Possibly use consumer types once Placement API
# supports them.
# NOTE(sbauza): We check allocations having *any* below RC, not having
# *all* of them.
NOVA_RCS = [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB, orc.VGPU,
orc.NET_BW_EGR_KILOBIT_PER_SEC,
orc.NET_BW_IGR_KILOBIT_PER_SEC,
orc.PCPU, orc.MEM_ENCRYPTION_CONTEXT]
# Since the RP can be a child RP, we need to get the root RP as it's
# the compute node UUID
# NOTE(sbauza): In case Placement doesn't support 1.14 microversion,
# that means we don't have nested RPs.
# Since we ask for microversion 1.14, all RPs have a root RP UUID.
cn_uuid = provider.get("root_provider_uuid")
# Now get all the existing instances and active migrations for this
# compute node
result = self._get_instances_and_current_migrations(ctxt, cn_uuid)
if result is False:
# We don't want to hard stop here because the compute service could
# have disappear while we could still have orphaned allocations.
output(_('The compute node for UUID %s can not be '
'found') % cn_uuid)
inst_uuids, mig_uuids = result or ([], [])
try:
pallocs = placement.get_allocations_for_resource_provider(
ctxt, provider['uuid'])
except exception.ResourceProviderAllocationRetrievalFailed:
print(_('Not able to find allocations for resource '
'provider %s.') % provider['uuid'])
raise
# Verify every allocations for each consumer UUID
for consumer_uuid, consumer_resources in pallocs.allocations.items():
consumer_allocs = consumer_resources['resources']
if any(rc in NOVA_RCS
for rc in consumer_allocs):
# We reset the consumer type for each allocation
consumer_type = None
# This is an allocation for Nova resources
# We need to guess whether the instance was deleted
# or if the instance is currently migrating
if not (consumer_uuid in inst_uuids or
consumer_uuid in mig_uuids):
# By default we suspect the orphaned allocation was for a
# migration...
consumer_type = 'migration'
if not(consumer_uuid in inst_uuids):
# ... but if we can't find it either for an instance,
# that means it was for this.
consumer_type = 'instance'
if consumer_type is not None:
output(_('Allocations were set against consumer UUID '
'%(consumer_uuid)s but no existing instances or '
'active migrations are related. ')
% {'consumer_uuid': consumer_uuid})
if delete:
deleted = self._delete_allocations_from_consumer(
ctxt, placement, provider, consumer_uuid,
consumer_type)
if not deleted:
print(_('Not able to delete allocations '
'for consumer UUID %s')
% consumer_uuid)
faults += 1
continue
output(_('Deleted allocations for consumer UUID '
'%(consumer_uuid)s on Resource Provider '
'%(rp)s: %(allocations)s')
% {'consumer_uuid': consumer_uuid,
'rp': provider['uuid'],
'allocations': consumer_allocs})
else:
output(_('Allocations for consumer UUID '
'%(consumer_uuid)s on Resource Provider '
'%(rp)s can be deleted: '
'%(allocations)s')
% {'consumer_uuid': consumer_uuid,
'rp': provider['uuid'],
'allocations': consumer_allocs})
num_processed += 1
return (num_processed, faults)
# TODO(sbauza): Move this to the scheduler report client ?
def _get_resource_provider(self, context, placement, uuid):
"""Returns a single Resource Provider by its UUID.
:param context: The nova.context.RequestContext auth context
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param uuid: A specific Resource Provider UUID
:return: the existing resource provider.
:raises: keystoneauth1.exceptions.base.ClientException on failure to
communicate with the placement API
"""
resource_providers = self._get_resource_providers(context, placement,
uuid=uuid)
if not resource_providers:
# The endpoint never returns a 404, it rather returns an empty list
raise exception.ResourceProviderNotFound(name_or_uuid=uuid)
return resource_providers[0]
def _get_resource_providers(self, context, placement, **kwargs):
"""Returns all resource providers regardless of their relationships.
:param context: The nova.context.RequestContext auth context
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param kwargs: extra attributes for the query string
:return: list of resource providers.
:raises: keystoneauth1.exceptions.base.ClientException on failure to
communicate with the placement API
"""
url = '/resource_providers'
if 'uuid' in kwargs:
url += '?uuid=%s' % kwargs['uuid']
resp = placement.get(url, global_request_id=context.global_id,
version='1.14')
if resp is None:
raise exception.PlacementAPIConnectFailure()
data = resp.json()
resource_providers = data.get('resource_providers')
return resource_providers
@action_description(
_("Audits orphaned allocations that are no longer corresponding to "
"existing instance resources. This command requires that "
"the [api_database]/connection and [placement] configuration "
"options are set."))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
@args('--resource_provider', metavar='<provider_uuid>',
dest='provider_uuid',
help='UUID of a specific resource provider to verify.')
@args('--delete', action='store_true', dest='delete', default=False,
help='Deletes orphaned allocations that were found.')
def audit(self, verbose=False, provider_uuid=None, delete=False):
"""Provides information about orphaned allocations that can be removed
Return codes:
* 0: Command completed successfully and no orphaned allocations exist.
* 1: An unexpected error happened during run.
* 3: Orphaned allocations were detected.
* 4: Orphaned allocations were detected and deleted.
* 127: Invalid input.
"""
ctxt = context.get_admin_context()
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
placement = report.SchedulerReportClient()
# Resets two in-memory dicts for knowing instances per compute node
self.cn_uuid_mapping = collections.defaultdict(tuple)
self.instances_mapping = collections.defaultdict(list)
num_processed = 0
faults = 0
if provider_uuid:
try:
resource_provider = self._get_resource_provider(
ctxt, placement, provider_uuid)
except exception.ResourceProviderNotFound:
print(_('Resource provider with UUID %s does not exist.') %
provider_uuid)
return 127
resource_providers = [resource_provider]
else:
resource_providers = self._get_resource_providers(ctxt, placement)
for provider in resource_providers:
nb_p, faults = self._check_orphaned_allocations_for_provider(
ctxt, placement, output, provider, delete)
num_processed += nb_p
if faults > 0:
print(_('The Resource Provider %s had problems when '
'deleting allocations. Stopping now. Please fix the '
'problem by hand and run again.') %
provider['uuid'])
return 1
if num_processed > 0:
suffix = 's.' if num_processed > 1 else '.'
output(_('Processed %(num)s allocation%(suffix)s')
% {'num': num_processed,
'suffix': suffix})
return 4 if delete else 3
return 0
class LibvirtCommands(object):
"""Commands for managing libvirt instances"""
@action_description(
_("Fetch the stored machine type of the instance from the database."))
@args('instance_uuid', metavar='<instance_uuid>',
help='UUID of instance to fetch the machine type for')
def get_machine_type(self, instance_uuid=None):
"""Fetch the stored machine type of the instance from the database.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Unable to find instance or instance mapping.
* 3: No machine type found for the instance.
"""
try:
ctxt = context.get_admin_context()
mtype = machine_type_utils.get_machine_type(ctxt, instance_uuid)
if mtype:
print(mtype)
return 0
else:
print(_('No machine type registered for instance %s' %
instance_uuid))
return 3
except (exception.InstanceNotFound,
exception.InstanceMappingNotFound) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
@action_description(
_("Set or update the stored machine type of the instance in the "
"database. This is only allowed for instances with a STOPPED, "
"SHELVED or SHELVED_OFFLOADED vm_state."))
@args('instance_uuid', metavar='<instance_uuid>',
help='UUID of instance to update')
@args('machine_type', metavar='<machine_type>',
help='Machine type to set')
@args('--force', action='store_true', default=False, dest='force',
help='Force the update of the stored machine type')
def update_machine_type(
self,
instance_uuid=None,
machine_type=None,
force=False
):
"""Set or update the machine type of a given instance.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Unable to find the instance or instance cell mapping.
* 3: Invalid instance vm_state.
* 4: Unable to move between underlying machine types (pc to q35 etc)
or to older versions.
* 5: Unsupported machine type.
"""
ctxt = context.get_admin_context()
if force:
print(_("Forcing update of machine type."))
try:
rtype, ptype = machine_type_utils.update_machine_type(
ctxt, instance_uuid, machine_type, force=force)
except exception.UnsupportedMachineType as e:
print(str(e))
return 5
except exception.InvalidMachineTypeUpdate as e:
print(str(e))
return 4
except exception.InstanceInvalidState as e:
print(str(e))
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
print(_("Updated instance %(instance_uuid)s machine type to "
"%(machine_type)s (previously %(previous_type)s)") %
{'instance_uuid': instance_uuid,
'machine_type': rtype,
'previous_type': ptype})
return 0
@action_description(
_("List the UUIDs of instances that do not have hw_machine_type set "
"in their image metadata"))
@args('--cell-uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=False, help='UUID of cell from which to list instances')
def list_unset_machine_type(self, cell_uuid=None):
"""List the UUIDs of instances without image_hw_machine_type set
Return codes:
* 0: Command completed successfully, no instances found.
* 1: An unexpected error happened.
* 2: Unable to find cell mapping.
* 3: Instances found without hw_machine_type set.
"""
try:
instance_list = machine_type_utils.get_instances_without_type(
context.get_admin_context(), cell_uuid)
except exception.CellMappingNotFound as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
if instance_list:
print('\n'.join(i.uuid for i in instance_list))
return 3
else:
print(_("No instances found without hw_machine_type set."))
return 0
class VolumeAttachmentCommands(object):
@action_description(_("Show the details of a given volume attachment."))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'volume_id', metavar='<volume_id>',
help='UUID of the volume')
@args(
'--connection_info', action='store_true',
default=False, dest='connection_info', required=False,
help='Only display the connection_info of the volume attachment.')
@args(
'--json', action='store_true',
default=False, dest='json', required=False,
help='Display output as json without a table.')
def show(
self,
instance_uuid=None,
volume_id=None,
connection_info=False,
json=False
):
"""Show attributes of a given volume attachment.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Instance not found.
* 3: Volume is not attached to instance.
"""
try:
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
cctxt, volume_id, instance_uuid)
if connection_info and json:
print(bdm.connection_info)
elif connection_info:
print(format_dict(jsonutils.loads(bdm.connection_info)))
elif json:
print(jsonutils.dumps(bdm))
else:
print(format_dict(bdm))
return 0
except exception.VolumeBDMNotFound as e:
print(str(e))
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
@action_description(_('Show the host connector for this host'))
@args(
'--json', action='store_true',
default=False, dest='json', required=False,
help='Display output as json without a table.')
def get_connector(self, json=False):
"""Show the host connector for this host.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
"""
try:
root_helper = utils.get_root_helper()
host_connector = connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
if json:
print(jsonutils.dumps(host_connector))
else:
print(format_dict(host_connector))
return 0
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
def _refresh(self, instance_uuid, volume_id, connector):
"""Refresh the bdm.connection_info associated with a volume attachment
Unlike the current driver BDM implementation under
nova.virt.block_device.DriverVolumeBlockDevice.refresh_connection_info
that simply GETs an existing volume attachment from cinder this method
cleans up any existing volume connections from the host before creating
a fresh attachment in cinder and populates the underlying BDM with
connection_info from the new attachment.
We can do that here as the command requires that the instance is
stopped, something that isn't always the case with the current driver
BDM approach and thus the two are kept seperate for the time being.
:param instance_uuid: UUID of instance
:param volume_id: ID of volume attached to the instance
:param connector: Connector with which to create the new attachment
"""
volume_api = cinder.API()
compute_rpcapi = rpcapi.ComputeAPI()
compute_api = api.API()
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(cctxt, instance_uuid)
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
cctxt, volume_id, instance_uuid)
if instance.vm_state != obj_fields.InstanceState.STOPPED:
raise exception.InstanceInvalidState(
instance_uuid=instance_uuid, attr='vm_state',
state=instance.vm_state,
method='refresh connection_info (must be stopped)')
if instance.locked:
raise exception.InstanceInvalidState(
instance_uuid=instance_uuid, attr='locked', state='True',
method='refresh connection_info (must be unlocked)')
compute_api.lock(
cctxt, instance,
reason=(
f'Refreshing connection_info for BDM {bdm.uuid} '
f'associated with instance {instance_uuid} and volume '
f'{volume_id}.'))
# NOTE(lyarwood): Yes this is weird but we need to recreate the admin
# context here to ensure the lock above uses a unique request-id
# versus the following refresh and eventual unlock.
ctxt = context.get_admin_context()
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance_action = None
new_attachment_id = None
try:
# Log this as an instance action so operators and users are
# aware that this has happened.
instance_action = objects.InstanceAction.action_start(
cctxt, instance_uuid,
instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT)
# Create a blank attachment to keep the volume reserved
new_attachment_id = volume_api.attachment_create(
cctxt, volume_id, instance_uuid)['id']
# RPC call to the compute to cleanup the connections, which
# will in turn unmap the volume from the compute host
# TODO(lyarwood): Add delete_attachment as a kwarg to
# remove_volume_connection as is available in the private
# method within the manager.
compute_rpcapi.remove_volume_connection(
cctxt, instance, volume_id, instance.host)
# Delete the existing volume attachment if present in the bdm.
# This isn't present when the original attachment was made
# using the legacy cinderv2 APIs before the cinderv3 attachment
# based APIs were present.
if bdm.attachment_id:
volume_api.attachment_delete(cctxt, bdm.attachment_id)
# Update the attachment with host connector, this regenerates
# the connection_info that we can now stash in the bdm.
new_connection_info = volume_api.attachment_update(
cctxt, new_attachment_id, connector,
bdm.device_name)['connection_info']
# Before we save it to the BDM ensure the serial is stashed as
# is done in various other codepaths when attaching volumes.
if 'serial' not in new_connection_info:
new_connection_info['serial'] = bdm.volume_id
# Save the new attachment id and connection_info to the DB
bdm.attachment_id = new_attachment_id
bdm.connection_info = jsonutils.dumps(new_connection_info)
bdm.save()
# Finally mark the attachment as complete, moving the volume
# status from attaching to in-use ahead of the instance
# restarting
volume_api.attachment_complete(cctxt, new_attachment_id)
return 0
finally:
# If the bdm.attachment_id wasn't updated make sure we clean
# up any attachments created during the run.
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
cctxt, volume_id, instance_uuid)
if (
new_attachment_id and
bdm.attachment_id != new_attachment_id
):
volume_api.attachment_delete(cctxt, new_attachment_id)
# If we failed during attachment_update the bdm.attachment_id
# has already been deleted so recreate it now to ensure the
# volume is still associated with the instance and clear the
# now stale connection_info.
try:
volume_api.attachment_get(cctxt, bdm.attachment_id)
except exception.VolumeAttachmentNotFound:
bdm.attachment_id = volume_api.attachment_create(
cctxt, volume_id, instance_uuid)['id']
bdm.connection_info = None
bdm.save()
# Finish the instance action if it was created and started
# TODO(lyarwood): While not really required we should store
# the exec and traceback in here on failure.
if instance_action:
instance_action.finish()
# NOTE(lyarwood): As above we need to unlock the instance with
# a fresh context and request-id to keep it unique. It's safe
# to assume that the instance is locked as this point as the
# earlier call to lock isn't part of this block.
with context.target_cell(
context.get_admin_context(),
im.cell_mapping
) as u_cctxt:
compute_api.unlock(u_cctxt, instance)
@action_description(
_("Refresh the connection info for a given volume attachment"))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'volume_id', metavar='<volume_id>',
help='UUID of the volume')
@args(
'connector_path', metavar='<connector_path>',
help='Path to file containing the host connector in json format.')
def refresh(self, instance_uuid=None, volume_id=None, connector_path=None):
"""Refresh the connection_info associated with a volume attachment
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Connector path does not exist.
* 3: Failed to open connector path.
* 4: Instance does not exist.
* 5: Instance state invalid.
* 6: Volume is not attached to instance.
"""
try:
# TODO(lyarwood): Make this optional and provide a rpcapi capable
# of pulling this down from the target compute during this flow.
if not os.path.exists(connector_path):
raise exception.InvalidInput(
reason=f'Connector file not found at {connector_path}')
# Read in the json connector file
with open(connector_path, 'rb') as connector_file:
connector = jsonutils.load(connector_file)
# Refresh the volume attachment
return self._refresh(instance_uuid, volume_id, connector)
except exception.VolumeBDMNotFound as e:
print(str(e))
return 6
except exception.InstanceInvalidState as e:
print(str(e))
return 5
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 4
except (ValueError, OSError):
print(
f'Failed to open {connector_path}. Does it contain valid '
f'connector_info data?'
)
return 3
except exception.InvalidInput as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
class ImagePropertyCommands():
@action_description(_("Show the value of an instance image property."))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'property', metavar='<image_property>',
help='Image property to show')
def show(self, instance_uuid=None, image_property=None):
"""Show value of a given instance image property.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Instance not found.
* 3: Image property not found.
"""
try:
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(
cctxt, instance_uuid, expected_attrs=['system_metadata'])
image_property = instance.system_metadata.get(
f'image_{image_property}')
if image_property:
print(image_property)
return 0
else:
print(f'Image property {image_property} not found '
f'for instance {instance_uuid}.')
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print(f'Unexpected error, see nova-manage.log for the full '
f'trace: {str(e)}')
LOG.exception('Unexpected error')
return 1
def _validate_image_properties(self, image_properties):
"""Validate the provided image property names and values
:param image_properties: List of image property names and values
"""
# Sanity check the format of the provided properties, this should be
# in the format of name=value.
if any(x for x in image_properties if '=' not in x):
raise exception.InvalidInput(
"--property should use the format key=value")
# Transform the list of delimited properties to a dict
image_properties = dict(prop.split('=') for prop in image_properties)
# Validate the names of each property by checking against the o.vo
# fields currently listed by ImageProps. We can't use from_dict to
# do this as it silently ignores invalid property keys.
for image_property_name in image_properties.keys():
if image_property_name not in objects.ImageMetaProps.fields:
raise exception.InvalidImagePropertyName(
image_property_name=image_property_name)
# Validate the values by creating an object from the provided dict.
objects.ImageMetaProps.from_dict(image_properties)
# Return the dict so we can update the instance system_metadata
return image_properties
def _update_image_properties(self, instance, image_properties):
"""Update instance image properties
:param instance: The instance to update
:param image_properties: List of image properties and values to update
"""
# Check the state of the instance
allowed_states = [
obj_fields.InstanceState.STOPPED,
obj_fields.InstanceState.SHELVED,
obj_fields.InstanceState.SHELVED_OFFLOADED,
]
if instance.vm_state not in allowed_states:
raise exception.InstanceInvalidState(
instance_uuid=instance.uuid, attr='vm_state',
state=instance.vm_state,
method='image_property set (must be STOPPED, SHELVED, OR '
'SHELVED_OFFLOADED).')
# Validate the property names and values
image_properties = self._validate_image_properties(image_properties)
# Update the image properties and save the instance record
for image_property, value in image_properties.items():
instance.system_metadata[f'image_{image_property}'] = value
# Save and return 0
instance.save()
return 0
@action_description(_(
"Set the values of instance image properties stored in the database. "
"This is only allowed for " "instances with a STOPPED, SHELVED or "
"SHELVED_OFFLOADED vm_state."))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'--property', metavar='<image_property>', action='append',
dest='image_properties',
help='Image property to set using the format name=value. For example: '
'--property hw_disk_bus=virtio --property hw_cdrom_bus=sata')
def set(self, instance_uuid=None, image_properties=None):
"""Set instance image property values
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Unable to find instance.
* 3: Instance is in an invalid state.
* 4: Invalid input format.
* 5: Invalid image property name.
* 6: Invalid image property value.
"""
try:
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(
cctxt, instance_uuid, expected_attrs=['system_metadata'])
return self._update_image_properties(
instance, image_properties)
except ValueError as e:
print(str(e))
return 6
except exception.InvalidImagePropertyName as e:
print(str(e))
return 5
except exception.InvalidInput as e:
print(str(e))
return 4
except exception.InstanceInvalidState as e:
print(str(e))
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
CATEGORIES = {
'api_db': ApiDbCommands,
'cell_v2': CellV2Commands,
'db': DbCommands, | }
add_command_parsers = functools.partial(cmd_common.add_command_parsers,
categories=CATEGORIES)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
post_mortem_opt = cfg.BoolOpt('post-mortem',
default=False,
help='Allow post-mortem debugging')
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opts([category_opt, post_mortem_opt])
config.parse_args(sys.argv)
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
_EXTRA_DEFAULT_LOG_LEVELS)
logging.setup(CONF, "nova")
objects.register_all()
if CONF.category.name == "version":
print(version.version_string_with_package())
return 0
if CONF.category.name == "bash-completion":
cmd_common.print_bash_completion(CATEGORIES)
return 0
try:
fn, fn_args, fn_kwargs = cmd_common.get_action_fn()
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return ret
except Exception:
if CONF.post_mortem:
import pdb
pdb.post_mortem()
else:
print(_("An error has occurred:\n%s") % traceback.format_exc())
return 255 | 'placement': PlacementCommands,
'libvirt': LibvirtCommands,
'volume_attachment': VolumeAttachmentCommands,
'image_property': ImagePropertyCommands, |
group.py | from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.name, self.header,self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self): | return int(self.id)
else:
return maxsize | if self.id: |
get_interface_endpoint.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetInterfaceEndpointResult',
'AwaitableGetInterfaceEndpointResult',
'get_interface_endpoint',
]
@pulumi.output_type
class GetInterfaceEndpointResult:
"""
Interface endpoint resource.
"""
def __init__(__self__, endpoint_service=None, etag=None, fqdn=None, location=None, name=None, network_interfaces=None, owner=None, provisioning_state=None, subnet=None, tags=None, type=None):
if endpoint_service and not isinstance(endpoint_service, dict):
raise TypeError("Expected argument 'endpoint_service' to be a dict")
pulumi.set(__self__, "endpoint_service", endpoint_service)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endpointService")
def endpoint_service(self) -> Optional['outputs.EndpointServiceResponse']:
"""
A reference to the service being brought into the virtual network.
"""
return pulumi.get(self, "endpoint_service")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
|
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
Gets an array of references to the network interfaces created for this interface endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter
def owner(self) -> str:
"""
A read-only property that identifies who created this interface endpoint.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the interface endpoint. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetInterfaceEndpointResult(GetInterfaceEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInterfaceEndpointResult(
endpoint_service=self.endpoint_service,
etag=self.etag,
fqdn=self.fqdn,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
owner=self.owner,
provisioning_state=self.provisioning_state,
subnet=self.subnet,
tags=self.tags,
type=self.type)
def get_interface_endpoint(expand: Optional[str] = None,
interface_endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInterfaceEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Expands referenced resources.
:param str interface_endpoint_name: The name of the interface endpoint.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['interfaceEndpointName'] = interface_endpoint_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20181001:getInterfaceEndpoint', __args__, opts=opts, typ=GetInterfaceEndpointResult).value
return AwaitableGetInterfaceEndpointResult(
endpoint_service=__ret__.endpoint_service,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
owner=__ret__.owner,
provisioning_state=__ret__.provisioning_state,
subnet=__ret__.subnet,
tags=__ret__.tags,
type=__ret__.type)
| """
A first-party service's FQDN that is mapped to the private IP allocated via this interface endpoint.
"""
return pulumi.get(self, "fqdn") |
wallets.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { WalletsRoutingModule } from './wallets-routing.module';
import { WalletsComponent } from '@wallets/wallets.component';
import { TezedgeSharedModule } from '@shared/tezedge-shared.module';
import { ClipboardModule } from '@angular/cdk/clipboard';
| @NgModule({
declarations: [
WalletsComponent,
],
imports: [
CommonModule,
WalletsRoutingModule,
TezedgeSharedModule,
ClipboardModule,
]
})
export class WalletsModule {} | |
giturl.py | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
filter_installed_packages,
apt_install,
)
if filter_installed_packages(['git']) != []:
apt_install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True |
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
try:
self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir | |
licenserule.py | from github.interfaces import Type
class LicenseRule(Type):
"""
Represents a license rule.
"""
__slots__ = ()
_repr_fields = [
"key",
]
_graphql_fields = [
"description",
"key",
"label",
]
@property
def description(self):
"""
A description of the license rule.
:type: :class:`str`
"""
return self._get_field("description")
@property
def | (self):
"""
The machine-readable key of the license rule.
:type: :class:`str`
"""
return self._get_field("key")
@property
def label(self):
"""
The human-readable label of the license rule.
:type: :class:`str`
"""
return self._get_field("label")
__all__ = [
"LicenseRule",
]
| key |
game_objects.rs | use std::rc::Rc;
| use crate::material::Material;
use crate::{Point3, dot};
pub trait Component {
fn get_name(&mut self) -> String;
}
pub trait GameObjectTrait:Hitable {
fn is_game_object(&mut self) -> bool {
true
}
fn is_in_object(&self, p: &Point3) -> bool;
fn distance(&self, p: &Point3) -> f64;
fn bind_material(&mut self, material: Rc<dyn Material>);
}
pub struct GameObject {
components: Vec<Box<dyn Component>>,
}
impl GameObject {
fn new() -> GameObject {
GameObject {
components:vec![]
}
}
}
pub struct Sphere {
center: Point3,
radius: f64,
material: Option<Rc<dyn Material>>,
}
impl Sphere {
pub fn new(center:Point3, radius:f64, material: Option<Rc<dyn Material>>) -> Sphere {
Sphere {
center:center,
radius:radius,
material,
}
}
}
impl Hitable for Sphere {
fn hit(&self, hit_record:&mut HitRecord, ray: &crate::ray::Ray, t_min: f64, t_max: f64) -> f64 {
let a = dot(&ray.direction, &ray.direction);
let half_b = dot(&ray.direction, &(ray.origin.clone() - self.center.clone()));
let c = dot(&(ray.origin.clone() - self.center.clone()), &(ray.origin.clone() - self.center.clone())) - self.radius * self.radius;
let delta = half_b * half_b - a * c;
let res = (- half_b - delta.sqrt()) / a;
if delta < 0.0 || res < t_min || res > t_max {
return -1.0
}
let p = ray.at(res);
let normal = (p.clone() - self.center.clone()).unit_vec3();
// if dot( &normal, &ray.direction) > 0.0 {
// normal = normal * -1.0
// }
hit_record.t = Some(res);
hit_record.point = Some(p);
hit_record.normal = Some(normal);
delta
}
}
impl GameObjectTrait for Sphere {
fn is_in_object(&self, p: &Point3) -> bool {
let v = p.clone() - self.center.clone();
v.length() < self.radius
}
fn distance(&self, p: &Point3) -> f64 {
let v = p.clone() - self.center.clone();
v.length() - self.radius
}
fn is_game_object(&mut self) -> bool {
true
}
fn bind_material(&mut self, material:Rc<dyn Material>) {
self.material = Some(material)
}
} | use crate::hitable::{HitRecord, Hitable};
|
part_2.rs | use aoclib::Solvable;
use aoc201501::part_2::PartTwo;
#[test]
fn input() -> aoclib::Result<()> {
let input = aoclib::reader(2015, 1, "input.txt")?;
assert_eq!(PartTwo::solve(&input)?, 1795);
Ok(())
}
#[test]
fn | () -> aoclib::Result<()> {
assert_eq!(PartTwo::solve(")")?, 1);
Ok(())
}
#[test]
fn example_2() -> aoclib::Result<()> {
assert_eq!(PartTwo::solve("()())")?, 5);
Ok(())
}
| example_1 |
synchronization.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: service/synchronization/synchronization.proto
package synchronization
import (
selection "github.com/mutagen-io/mutagen/pkg/selection"
synchronization "github.com/mutagen-io/mutagen/pkg/synchronization"
url "github.com/mutagen-io/mutagen/pkg/url"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// CreationSpecification contains the metadata required for a new session.
type CreationSpecification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Alpha is the alpha endpoint URL for the session.
Alpha *url.URL `protobuf:"bytes,1,opt,name=alpha,proto3" json:"alpha,omitempty"`
// Beta is the beta endpoint URL for the session.
Beta *url.URL `protobuf:"bytes,2,opt,name=beta,proto3" json:"beta,omitempty"`
// Configuration is the base session configuration. It is the result of
// merging the global configuration (unless disabled), any manually
// specified configuration file, and any command line configuration
// parameters.
Configuration *synchronization.Configuration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"`
// ConfigurationAlpha is the alpha-specific session configuration. It is
// determined based on command line configuration parameters.
ConfigurationAlpha *synchronization.Configuration `protobuf:"bytes,4,opt,name=configurationAlpha,proto3" json:"configurationAlpha,omitempty"`
// ConfigurationBeta is the beta-specific session configuration. It is
// determined based on command line configuration parameters.
ConfigurationBeta *synchronization.Configuration `protobuf:"bytes,5,opt,name=configurationBeta,proto3" json:"configurationBeta,omitempty"`
// Name is the name for the session object.
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
// Labels are the labels for the session object.
Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Paused indicates whether or not to create the session pre-paused.
Paused bool `protobuf:"varint,8,opt,name=paused,proto3" json:"paused,omitempty"`
}
func (x *CreationSpecification) Reset() {
*x = CreationSpecification{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreationSpecification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreationSpecification) ProtoMessage() {}
func (x *CreationSpecification) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreationSpecification.ProtoReflect.Descriptor instead.
func (*CreationSpecification) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{0}
}
func (x *CreationSpecification) GetAlpha() *url.URL {
if x != nil {
return x.Alpha
}
return nil
}
func (x *CreationSpecification) GetBeta() *url.URL {
if x != nil {
return x.Beta
}
return nil
}
func (x *CreationSpecification) GetConfiguration() *synchronization.Configuration {
if x != nil {
return x.Configuration
}
return nil
}
func (x *CreationSpecification) GetConfigurationAlpha() *synchronization.Configuration {
if x != nil {
return x.ConfigurationAlpha
}
return nil
}
func (x *CreationSpecification) GetConfigurationBeta() *synchronization.Configuration {
if x != nil {
return x.ConfigurationBeta
}
return nil
}
func (x *CreationSpecification) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *CreationSpecification) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *CreationSpecification) GetPaused() bool {
if x != nil {
return x.Paused
}
return false
}
// CreateRequest encodes a request for session creation.
type CreateRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Prompter is the prompter identifier to use for creating sessions.
Prompter string `protobuf:"bytes,1,opt,name=prompter,proto3" json:"prompter,omitempty"`
// Specification is the creation specification.
Specification *CreationSpecification `protobuf:"bytes,2,opt,name=specification,proto3" json:"specification,omitempty"`
}
func (x *CreateRequest) Reset() {
*x = CreateRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateRequest) ProtoMessage() {}
func (x *CreateRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateRequest.ProtoReflect.Descriptor instead.
func (*CreateRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{1}
}
func (x *CreateRequest) GetPrompter() string {
if x != nil {
return x.Prompter
}
return ""
}
func (x *CreateRequest) GetSpecification() *CreationSpecification {
if x != nil {
return x.Specification
}
return nil
}
// CreateResponse encodes a session creation response.
type CreateResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Session is the resulting session identifier.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
}
func (x *CreateResponse) Reset() {
*x = CreateResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateResponse) ProtoMessage() {}
func (x *CreateResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateResponse.ProtoReflect.Descriptor instead.
func (*CreateResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{2}
}
func (x *CreateResponse) GetSession() string {
if x != nil {
return x.Session
}
return ""
}
// ListRequest encodes a request for session metadata.
type ListRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Selection is the session selection criteria.
Selection *selection.Selection `protobuf:"bytes,1,opt,name=selection,proto3" json:"selection,omitempty"`
// PreviousStateIndex is the previously seen state index. 0 may be provided
// to force an immediate state listing.
PreviousStateIndex uint64 `protobuf:"varint,2,opt,name=previousStateIndex,proto3" json:"previousStateIndex,omitempty"`
}
func (x *ListRequest) Reset() {
*x = ListRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListRequest) ProtoMessage() {}
func (x *ListRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead.
func (*ListRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{3}
}
func (x *ListRequest) GetSelection() *selection.Selection {
if x != nil {
return x.Selection
}
return nil
}
func (x *ListRequest) GetPreviousStateIndex() uint64 {
if x != nil {
return x.PreviousStateIndex
}
return 0
}
// ListResponse encodes session metadata.
type ListResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// StateIndex is the state index associated with the session metadata.
StateIndex uint64 `protobuf:"varint,1,opt,name=stateIndex,proto3" json:"stateIndex,omitempty"`
// SessionStates are the session metadata states.
SessionStates []*synchronization.State `protobuf:"bytes,2,rep,name=sessionStates,proto3" json:"sessionStates,omitempty"`
}
func (x *ListResponse) Reset() {
*x = ListResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListResponse) ProtoMessage() {}
func (x *ListResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead.
func (*ListResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{4}
}
func (x *ListResponse) GetStateIndex() uint64 {
if x != nil {
return x.StateIndex
}
return 0
}
func (x *ListResponse) GetSessionStates() []*synchronization.State {
if x != nil {
return x.SessionStates
}
return nil
}
// FlushRequest encodes a request to flush sessions.
type FlushRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Prompter is the prompter to use for status message updates.
Prompter string `protobuf:"bytes,1,opt,name=prompter,proto3" json:"prompter,omitempty"`
// Selection is the session selection criteria.
Selection *selection.Selection `protobuf:"bytes,2,opt,name=selection,proto3" json:"selection,omitempty"`
// SkipWait indicates whether or not the operation should avoid blocking.
SkipWait bool `protobuf:"varint,3,opt,name=skipWait,proto3" json:"skipWait,omitempty"`
}
func (x *FlushRequest) Reset() {
*x = FlushRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FlushRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlushRequest) ProtoMessage() {}
func (x *FlushRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlushRequest.ProtoReflect.Descriptor instead.
func (*FlushRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{5}
}
func (x *FlushRequest) GetPrompter() string {
if x != nil {
return x.Prompter
}
return ""
}
func (x *FlushRequest) GetSelection() *selection.Selection {
if x != nil {
return x.Selection
}
return nil
}
func (x *FlushRequest) GetSkipWait() bool {
if x != nil {
return x.SkipWait
}
return false
}
// FlushResponse indicates completion of flush operation(s).
type FlushResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *FlushResponse) Reset() {
*x = FlushResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FlushResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlushResponse) ProtoMessage() {}
func (x *FlushResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlushResponse.ProtoReflect.Descriptor instead.
func (*FlushResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{6}
}
// PauseRequest encodes a request to pause sessions.
type PauseRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Prompter is the prompter to use for status message updates.
Prompter string `protobuf:"bytes,1,opt,name=prompter,proto3" json:"prompter,omitempty"`
// Selection is the session selection criteria.
Selection *selection.Selection `protobuf:"bytes,2,opt,name=selection,proto3" json:"selection,omitempty"`
}
func (x *PauseRequest) Reset() {
*x = PauseRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PauseRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PauseRequest) ProtoMessage() {}
func (x *PauseRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PauseRequest.ProtoReflect.Descriptor instead.
func (*PauseRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{7}
}
func (x *PauseRequest) GetPrompter() string {
if x != nil {
return x.Prompter
}
return ""
}
func (x *PauseRequest) GetSelection() *selection.Selection {
if x != nil {
return x.Selection
}
return nil
}
// PauseResponse indicates completion of pause operation(s).
type PauseResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *PauseResponse) Reset() {
*x = PauseResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PauseResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PauseResponse) ProtoMessage() {}
func (x *PauseResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PauseResponse.ProtoReflect.Descriptor instead.
func (*PauseResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{8}
}
// ResumeRequest encodes a request to resume sessions.
type ResumeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Prompter is the prompter identifier to use for resuming sessions.
Prompter string `protobuf:"bytes,1,opt,name=prompter,proto3" json:"prompter,omitempty"`
// Selection is the session selection criteria.
Selection *selection.Selection `protobuf:"bytes,2,opt,name=selection,proto3" json:"selection,omitempty"`
}
func (x *ResumeRequest) Reset() {
*x = ResumeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResumeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResumeRequest) ProtoMessage() {}
func (x *ResumeRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResumeRequest.ProtoReflect.Descriptor instead.
func (*ResumeRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{9}
}
func (x *ResumeRequest) GetPrompter() string {
if x != nil {
return x.Prompter
}
return ""
}
func (x *ResumeRequest) GetSelection() *selection.Selection {
if x != nil {
return x.Selection
}
return nil
}
// ResumeResponse indicates completion of resume operation(s).
type ResumeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ResumeResponse) Reset() {
*x = ResumeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResumeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResumeResponse) ProtoMessage() {}
func (x *ResumeResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResumeResponse.ProtoReflect.Descriptor instead.
func (*ResumeResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{10}
}
// ResetRequest encodes a request to reset sessions.
type ResetRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Prompter is the prompter identifier to use for resetting sessions.
Prompter string `protobuf:"bytes,1,opt,name=prompter,proto3" json:"prompter,omitempty"`
// Selection is the session selection criteria.
Selection *selection.Selection `protobuf:"bytes,2,opt,name=selection,proto3" json:"selection,omitempty"`
}
func (x *ResetRequest) Reset() {
*x = ResetRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResetRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResetRequest) ProtoMessage() {}
func (x *ResetRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResetRequest.ProtoReflect.Descriptor instead.
func (*ResetRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{11}
}
func (x *ResetRequest) GetPrompter() string {
if x != nil {
return x.Prompter
}
return ""
}
func (x *ResetRequest) GetSelection() *selection.Selection {
if x != nil {
return x.Selection
}
return nil
}
// ResetResponse indicates completion of reset operation(s).
type ResetResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ResetResponse) Reset() {
*x = ResetResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResetResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResetResponse) ProtoMessage() {}
func (x *ResetResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResetResponse.ProtoReflect.Descriptor instead.
func (*ResetResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{12}
}
// TerminateRequest encodes a request to terminate sessions.
type TerminateRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Prompter is the prompter to use for status message updates.
Prompter string `protobuf:"bytes,1,opt,name=prompter,proto3" json:"prompter,omitempty"`
// Selection is the session selection criteria.
Selection *selection.Selection `protobuf:"bytes,2,opt,name=selection,proto3" json:"selection,omitempty"`
}
func (x *TerminateRequest) Reset() {
*x = TerminateRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TerminateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TerminateRequest) ProtoMessage() {}
func (x *TerminateRequest) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TerminateRequest.ProtoReflect.Descriptor instead.
func (*TerminateRequest) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{13}
}
func (x *TerminateRequest) GetPrompter() string {
if x != nil {
return x.Prompter
}
return ""
}
func (x *TerminateRequest) GetSelection() *selection.Selection {
if x != nil {
return x.Selection
}
return nil
}
// TerminateResponse indicates completion of termination operation(s).
type TerminateResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *TerminateResponse) Reset() {
*x = TerminateResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_synchronization_synchronization_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TerminateResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TerminateResponse) ProtoMessage() {}
func (x *TerminateResponse) ProtoReflect() protoreflect.Message {
mi := &file_service_synchronization_synchronization_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TerminateResponse.ProtoReflect.Descriptor instead.
func (*TerminateResponse) Descriptor() ([]byte, []int) {
return file_service_synchronization_synchronization_proto_rawDescGZIP(), []int{14}
}
var File_service_synchronization_synchronization_proto protoreflect.FileDescriptor
var file_service_synchronization_synchronization_proto_rawDesc = []byte{
0x0a, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72,
0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72,
0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x0f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x1a, 0x19, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x65, 0x6c, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x73, 0x79, 0x6e,
0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x1b, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x75,
0x72, 0x6c, 0x2f, 0x75, 0x72, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xec, 0x03, 0x0a,
0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x05, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x72, 0x6c, 0x2e, 0x55, 0x52, 0x4c, 0x52,
0x05, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x12, 0x1c, 0x0a, 0x04, 0x62, 0x65, 0x74, 0x61, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x72, 0x6c, 0x2e, 0x55, 0x52, 0x4c, 0x52, 0x04,
0x62, 0x65, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x79,
0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x12, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x70, 0x68, 0x61,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x12, 0x4c, 0x0a, 0x11, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x65, 0x74, 0x61, 0x18,
0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x42, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a, 0x06,
0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43,
0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x75, 0x73,
0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x75, 0x73, 0x65, 0x64,
0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x79, 0x0a, 0x0d, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x73, 0x70, 0x65, 0x63,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x26, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2a, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73,
0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69,
0x6f, 0x6e, 0x22, 0x71, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75,
0x73, 0x53, 0x74, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28,
0x04, 0x52, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x53, 0x74, 0x61, 0x74, 0x65,
0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x6c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x49, 0x6e,
0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65,
0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53,
0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
0x74, 0x65, 0x73, 0x22, 0x7a, 0x0a, 0x0c, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x12,
0x32, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53,
0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6b, 0x69, 0x70, 0x57, 0x61, 0x69, 0x74, 0x18,
0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6b, 0x69, 0x70, 0x57, 0x61, 0x69, 0x74, 0x22,
0x0f, 0x0a, 0x0d, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x5e, 0x0a, 0x0c, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x09,
0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x14, 0x2e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x22, 0x0f, 0x0a, 0x0d, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x5f, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72, 0x12, 0x32,
0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65,
0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x22, 0x10, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x65, 0x72,
0x12, 0x32, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x22, 0x0f, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x10, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61,
0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f,
0x6d, 0x70, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f,
0x6d, 0x70, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6c, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09,
0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x13, 0x0a, 0x11, 0x54, 0x65, 0x72,
0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa6,
0x04, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x45, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72,
0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x05, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x12,
0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e,
0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x48, 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63,
0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x75, 0x73,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68,
0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x75, 0x73, 0x65,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x06, 0x52, 0x65,
0x73, 0x75, 0x6d, 0x65, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x05, 0x52, 0x65, 0x73, 0x65, 0x74,
0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x54, 0x0a, 0x09, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x12, 0x21,
0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x22, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x2d, 0x69, 0x6f,
0x2f, 0x6d, 0x75, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_service_synchronization_synchronization_proto_rawDescOnce sync.Once
file_service_synchronization_synchronization_proto_rawDescData = file_service_synchronization_synchronization_proto_rawDesc
)
func | () []byte {
file_service_synchronization_synchronization_proto_rawDescOnce.Do(func() {
file_service_synchronization_synchronization_proto_rawDescData = protoimpl.X.CompressGZIP(file_service_synchronization_synchronization_proto_rawDescData)
})
return file_service_synchronization_synchronization_proto_rawDescData
}
var file_service_synchronization_synchronization_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
var file_service_synchronization_synchronization_proto_goTypes = []interface{}{
(*CreationSpecification)(nil), // 0: synchronization.CreationSpecification
(*CreateRequest)(nil), // 1: synchronization.CreateRequest
(*CreateResponse)(nil), // 2: synchronization.CreateResponse
(*ListRequest)(nil), // 3: synchronization.ListRequest
(*ListResponse)(nil), // 4: synchronization.ListResponse
(*FlushRequest)(nil), // 5: synchronization.FlushRequest
(*FlushResponse)(nil), // 6: synchronization.FlushResponse
(*PauseRequest)(nil), // 7: synchronization.PauseRequest
(*PauseResponse)(nil), // 8: synchronization.PauseResponse
(*ResumeRequest)(nil), // 9: synchronization.ResumeRequest
(*ResumeResponse)(nil), // 10: synchronization.ResumeResponse
(*ResetRequest)(nil), // 11: synchronization.ResetRequest
(*ResetResponse)(nil), // 12: synchronization.ResetResponse
(*TerminateRequest)(nil), // 13: synchronization.TerminateRequest
(*TerminateResponse)(nil), // 14: synchronization.TerminateResponse
nil, // 15: synchronization.CreationSpecification.LabelsEntry
(*url.URL)(nil), // 16: url.URL
(*synchronization.Configuration)(nil), // 17: synchronization.Configuration
(*selection.Selection)(nil), // 18: selection.Selection
(*synchronization.State)(nil), // 19: synchronization.State
}
var file_service_synchronization_synchronization_proto_depIdxs = []int32{
16, // 0: synchronization.CreationSpecification.alpha:type_name -> url.URL
16, // 1: synchronization.CreationSpecification.beta:type_name -> url.URL
17, // 2: synchronization.CreationSpecification.configuration:type_name -> synchronization.Configuration
17, // 3: synchronization.CreationSpecification.configurationAlpha:type_name -> synchronization.Configuration
17, // 4: synchronization.CreationSpecification.configurationBeta:type_name -> synchronization.Configuration
15, // 5: synchronization.CreationSpecification.labels:type_name -> synchronization.CreationSpecification.LabelsEntry
0, // 6: synchronization.CreateRequest.specification:type_name -> synchronization.CreationSpecification
18, // 7: synchronization.ListRequest.selection:type_name -> selection.Selection
19, // 8: synchronization.ListResponse.sessionStates:type_name -> synchronization.State
18, // 9: synchronization.FlushRequest.selection:type_name -> selection.Selection
18, // 10: synchronization.PauseRequest.selection:type_name -> selection.Selection
18, // 11: synchronization.ResumeRequest.selection:type_name -> selection.Selection
18, // 12: synchronization.ResetRequest.selection:type_name -> selection.Selection
18, // 13: synchronization.TerminateRequest.selection:type_name -> selection.Selection
1, // 14: synchronization.Synchronization.Create:input_type -> synchronization.CreateRequest
3, // 15: synchronization.Synchronization.List:input_type -> synchronization.ListRequest
5, // 16: synchronization.Synchronization.Flush:input_type -> synchronization.FlushRequest
7, // 17: synchronization.Synchronization.Pause:input_type -> synchronization.PauseRequest
9, // 18: synchronization.Synchronization.Resume:input_type -> synchronization.ResumeRequest
11, // 19: synchronization.Synchronization.Reset:input_type -> synchronization.ResetRequest
13, // 20: synchronization.Synchronization.Terminate:input_type -> synchronization.TerminateRequest
2, // 21: synchronization.Synchronization.Create:output_type -> synchronization.CreateResponse
4, // 22: synchronization.Synchronization.List:output_type -> synchronization.ListResponse
6, // 23: synchronization.Synchronization.Flush:output_type -> synchronization.FlushResponse
8, // 24: synchronization.Synchronization.Pause:output_type -> synchronization.PauseResponse
10, // 25: synchronization.Synchronization.Resume:output_type -> synchronization.ResumeResponse
12, // 26: synchronization.Synchronization.Reset:output_type -> synchronization.ResetResponse
14, // 27: synchronization.Synchronization.Terminate:output_type -> synchronization.TerminateResponse
21, // [21:28] is the sub-list for method output_type
14, // [14:21] is the sub-list for method input_type
14, // [14:14] is the sub-list for extension type_name
14, // [14:14] is the sub-list for extension extendee
0, // [0:14] is the sub-list for field type_name
}
func init() { file_service_synchronization_synchronization_proto_init() }
func file_service_synchronization_synchronization_proto_init() {
if File_service_synchronization_synchronization_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_service_synchronization_synchronization_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreationSpecification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PauseRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PauseResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResumeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResumeResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResetRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResetResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TerminateRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_service_synchronization_synchronization_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TerminateResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_service_synchronization_synchronization_proto_rawDesc,
NumEnums: 0,
NumMessages: 16,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_service_synchronization_synchronization_proto_goTypes,
DependencyIndexes: file_service_synchronization_synchronization_proto_depIdxs,
MessageInfos: file_service_synchronization_synchronization_proto_msgTypes,
}.Build()
File_service_synchronization_synchronization_proto = out.File
file_service_synchronization_synchronization_proto_rawDesc = nil
file_service_synchronization_synchronization_proto_goTypes = nil
file_service_synchronization_synchronization_proto_depIdxs = nil
}
| file_service_synchronization_synchronization_proto_rawDescGZIP |
icon.videoPlayer-js.min.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[357],{3193:function(e,t,n){"use strict";n.r(t),n.d(t,"icon",(function(){return c}));n(11),n(2),n(7),n(8),n(5),n(9);var r=n(0),a=n.n(r);function i(){return(i=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function | (e,t){if(null==e)return{};var n,r,a=function(e,t){if(null==e)return{};var n,r,a={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var o=function(e){var t=e.title,n=e.titleId,r=l(e,["title","titleId"]);return a.a.createElement("svg",i({width:16,height:16,viewBox:"0 0 16 16",xmlns:"http://www.w3.org/2000/svg","aria-labelledby":n},r),t?a.a.createElement("title",{id:n},t):null,a.a.createElement("path",{d:"M0 1.994C0 .893.895 0 1.994 0h12.012C15.107 0 16 .895 16 1.994v12.012A1.995 1.995 0 0114.006 16H1.994A1.995 1.995 0 010 14.006V1.994zm1 0v12.012c0 .548.446.994.994.994h12.012a.995.995 0 00.994-.994V1.994A.995.995 0 0014.006 1H1.994A.995.995 0 001 1.994zM1 4h14v1H1V4zm1.5-1a.5.5 0 010-1h1a.5.5 0 010 1h-1zm3 0a.5.5 0 010-1h1a.5.5 0 010 1h-1zm4.947 6.106a1 1 0 010 1.788l-3 2A1 1 0 016 12V8a1 1 0 011.447-.894l3 2zM10 10L7 8v4l3-2z"}))},c=o;o.__docgenInfo={description:"",methods:[],displayName:"EuiIconVideoPlayer"}}}]);
//# sourceMappingURL=icon.videoPlayer-js.min.js.map | l |
config.rs | use crate::configs::StarshipRootConfig;
use crate::utils;
use ansi_term::{Color, Style};
use indexmap::IndexMap;
use serde::Serialize;
use std::clone::Clone;
use std::collections::HashMap;
use std::io::ErrorKind;
use std::marker::Sized;
use std::env;
use toml::Value;
/// Root config of a module.
pub trait RootModuleConfig<'a>
where
Self: ModuleConfig<'a> + Default,
{
/// Load root module config from given Value and fill unset variables with default
/// values.
fn load(config: &'a Value) -> Self {
let mut out = Self::default();
out.load_config(config);
out
}
/// Helper function that will call RootModuleConfig::load(config) if config is Some,
/// or RootModuleConfig::new() if config is None.
fn try_load(config: Option<&'a Value>) -> Self {
if let Some(config) = config {
Self::load(config)
} else {
Self::default()
}
}
}
impl<'a, T: ModuleConfig<'a> + Default> RootModuleConfig<'a> for T {}
/// Parsable config.
pub trait ModuleConfig<'a>
where
Self: Sized + Clone,
{
/// Construct a `ModuleConfig` from a toml value.
fn from_config(_config: &'a Value) -> Option<Self> {
None
}
/// Merge `self` with config from a toml table.
fn load_config(&mut self, config: &'a Value) {
if let Some(value) = Self::from_config(config) {
let _ = std::mem::replace(self, value);
}
}
}
// TODO: Add logging to default implementations
impl<'a> ModuleConfig<'a> for &'a str {
fn from_config(config: &'a Value) -> Option<Self> {
config.as_str()
}
}
impl<'a> ModuleConfig<'a> for Style {
fn from_config(config: &Value) -> Option<Self> {
parse_style_string(config.as_str()?)
}
}
impl<'a> ModuleConfig<'a> for bool {
fn from_config(config: &Value) -> Option<Self> {
config.as_bool()
}
}
impl<'a> ModuleConfig<'a> for i64 {
fn from_config(config: &Value) -> Option<Self> {
config.as_integer()
}
}
impl<'a> ModuleConfig<'a> for u64 {
fn from_config(config: &Value) -> Option<Self> {
match config {
Value::Integer(value) => {
// Converting i64 to u64
if *value > 0 {
Some(*value as u64)
} else {
None
}
}
Value::String(value) => value.parse::<u64>().ok(),
_ => None,
}
}
}
impl<'a> ModuleConfig<'a> for f64 {
fn from_config(config: &Value) -> Option<Self> {
config.as_float()
}
}
impl<'a> ModuleConfig<'a> for usize {
fn from_config(config: &Value) -> Option<Self> {
match config {
Value::Integer(value) => {
if *value > 0 {
Some(*value as usize)
} else {
None
}
}
Value::String(value) => value.parse::<usize>().ok(),
_ => None,
}
}
}
impl<'a, T> ModuleConfig<'a> for Vec<T>
where
T: ModuleConfig<'a>,
{
fn from_config(config: &'a Value) -> Option<Self> {
config
.as_array()?
.iter()
.map(|value| T::from_config(value))
.collect()
}
}
impl<'a, T, S: ::std::hash::BuildHasher + Default> ModuleConfig<'a> for HashMap<String, T, S>
where
T: ModuleConfig<'a>,
S: Clone,
{
fn from_config(config: &'a Value) -> Option<Self> {
let mut hm = HashMap::default();
for (x, y) in config.as_table()?.iter() {
hm.insert(x.clone(), T::from_config(y)?);
}
Some(hm)
}
}
impl<'a, T, S: ::std::hash::BuildHasher + Default> ModuleConfig<'a> for IndexMap<String, T, S>
where
T: ModuleConfig<'a>,
S: Clone,
{
fn from_config(config: &'a Value) -> Option<Self> {
let mut im = IndexMap::default();
for (x, y) in config.as_table()?.iter() {
im.insert(x.clone(), T::from_config(y)?);
}
Some(im)
}
}
impl<'a, T> ModuleConfig<'a> for Option<T>
where
T: ModuleConfig<'a> + Sized,
{
fn from_config(config: &'a Value) -> Option<Self> {
Some(T::from_config(config))
}
}
/// A wrapper around `Vec<T>` that implements `ModuleConfig`, and either
/// accepts a value of type `T` or a list of values of type `T`.
#[derive(Clone, Default, Serialize)]
pub struct VecOr<T>(pub Vec<T>);
impl<'a, T> ModuleConfig<'a> for VecOr<T>
where
T: ModuleConfig<'a> + Sized,
{
fn from_config(config: &'a Value) -> Option<Self> {
if let Some(item) = T::from_config(config) {
return Some(VecOr(vec![item]));
}
let vec = config
.as_array()?
.iter()
.map(|value| T::from_config(value))
.collect::<Option<Vec<T>>>()?;
Some(VecOr(vec))
}
}
/// Root config of starship.
pub struct StarshipConfig {
pub config: Option<Value>,
}
impl StarshipConfig {
/// Initialize the Config struct
pub fn initialize() -> Self {
if let Some(file_data) = Self::config_from_file() {
StarshipConfig {
config: Some(file_data),
}
} else {
StarshipConfig {
config: Some(Value::Table(toml::value::Table::new())),
}
}
}
/// Create a config from a starship configuration file
fn config_from_file() -> Option<Value> {
let file_path = if let Ok(path) = env::var("STARSHIP_CONFIG") {
// Use $STARSHIP_CONFIG as the config path if available
log::debug!("STARSHIP_CONFIG is set: {}", &path);
path
} else {
// Default to using ~/.config/starship.toml
log::debug!("STARSHIP_CONFIG is not set");
let config_path = utils::home_dir()?.join(".config/starship.toml");
let config_path_str = config_path.to_str()?.to_owned();
log::debug!("Using default config path: {}", config_path_str);
config_path_str
};
let toml_content = match utils::read_file(&file_path) {
Ok(content) => {
log::trace!("Config file content: \"\n{}\"", &content);
Some(content)
}
Err(e) => {
let level = if e.kind() == ErrorKind::NotFound {
log::Level::Debug
} else {
log::Level::Error
};
log::log!(level, "Unable to read config file content: {}", &e);
None
}
}?;
match toml::from_str(&toml_content) {
Ok(parsed) => {
log::debug!("Config parsed: {:?}", &parsed);
Some(parsed)
}
Err(error) => {
log::error!("Unable to parse the config file: {}", error);
None
}
}
}
/// Get the subset of the table for a module by its name
pub fn get_module_config(&self, module_name: &str) -> Option<&Value> {
let module_config = self.get_config(&[module_name]);
if module_config.is_some() {
log::debug!(
"Config found for \"{}\": {:?}",
&module_name,
&module_config
);
}
module_config
}
/// Get the value of the config in a specific path
pub fn get_config(&self, path: &[&str]) -> Option<&Value> {
let mut prev_table = self.config.as_ref()?.as_table()?;
assert_ne!(
path.len(),
0,
"Starship::get_config called with an empty path"
);
let (table_options, _) = path.split_at(path.len() - 1);
// Assumes all keys except the last in path has a table
for option in table_options {
match prev_table.get(*option) {
Some(value) => match value.as_table() {
Some(value) => {
prev_table = value;
}
None => {
log::trace!(
"No config found for \"{}\": \"{}\" is not a table",
path.join("."),
&option
);
return None;
}
},
None => {
log::trace!(
"No config found for \"{}\": Option \"{}\" not found",
path.join("."),
&option
);
return None;
}
}
}
let last_option = path.last().unwrap();
let value = prev_table.get(*last_option);
if value.is_none() {
log::trace!(
"No config found for \"{}\": Option \"{}\" not found",
path.join("."),
&last_option
);
};
value
}
/// Get the subset of the table for a custom module by its name
pub fn get_custom_module_config(&self, module_name: &str) -> Option<&Value> {
let module_config = self.get_config(&["custom", module_name]);
if module_config.is_some() {
log::debug!(
"Custom config found for \"{}\": {:?}",
&module_name,
&module_config
);
}
module_config
}
/// Get the table of all the registered custom modules, if any
pub fn get_custom_modules(&self) -> Option<&toml::value::Table> {
self.get_config(&["custom"])?.as_table()
}
/// Get the table of all the registered env_var modules, if any
pub fn get_env_var_modules(&self) -> Option<&toml::value::Table> {
self.get_config(&["env_var"])?.as_table()
}
pub fn get_root_config(&self) -> StarshipRootConfig {
if let Some(root_config) = &self.config {
StarshipRootConfig::load(root_config)
} else {
StarshipRootConfig::default()
}
}
}
/** Parse a style string which represents an ansi style. Valid tokens in the style
string include the following:
- 'fg:<color>' (specifies that the color read should be a foreground color)
- 'bg:<color>' (specifies that the color read should be a background color)
- 'underline'
- 'bold'
- 'italic'
- 'inverted'
- '<color>' (see the parse_color_string doc for valid color strings)
*/
pub fn parse_style_string(style_string: &str) -> Option<ansi_term::Style> {
style_string
.split_whitespace()
.fold(Some(ansi_term::Style::new()), |maybe_style, token| {
maybe_style.and_then(|style| {
let token = token.to_lowercase();
// Check for FG/BG identifiers and strip them off if appropriate
// If col_fg is true, color the foreground. If it's false, color the background.
let (token, col_fg) = if token.as_str().starts_with("fg:") {
(token.trim_start_matches("fg:").to_owned(), true)
} else if token.as_str().starts_with("bg:") {
(token.trim_start_matches("bg:").to_owned(), false)
} else {
(token, true) // Bare colors are assumed to color the foreground
};
match token.as_str() {
"underline" => Some(style.underline()),
"bold" => Some(style.bold()),
"italic" => Some(style.italic()),
"dimmed" => Some(style.dimmed()),
"inverted" => Some(style.reverse()),
// When the string is supposed to be a color:
// Decide if we yield none, reset background or set color.
color_string => {
if color_string == "none" && col_fg {
None // fg:none yields no style.
} else {
// Either bg or valid color or both.
let parsed = parse_color_string(color_string);
// bg + invalid color = reset the background to default.
if !col_fg && parsed.is_none() {
let mut new_style = style;
new_style.background = Option::None;
Some(new_style)
} else {
// Valid color, apply color to either bg or fg
parsed.map(|ansi_color| {
if col_fg {
style.fg(ansi_color)
} else {
style.on(ansi_color)
}
})
}
}
}
}
})
})
}
/** Parse a string that represents a color setting, returning None if this fails
There are three valid color formats:
- #RRGGBB (a hash followed by an RGB hex)
- u8 (a number from 0-255, representing an ANSI color)
- colstring (one of the 16 predefined color strings)
*/
fn parse_color_string(color_string: &str) -> Option<ansi_term::Color> {
// Parse RGB hex values
log::trace!("Parsing color_string: {}", color_string);
if color_string.starts_with('#') {
log::trace!(
"Attempting to read hexadecimal color string: {}",
color_string
);
if color_string.len() != 7 {
log::debug!("Could not parse hexadecimal string: {}", color_string);
return None;
}
let r: u8 = u8::from_str_radix(&color_string[1..3], 16).ok()?;
let g: u8 = u8::from_str_radix(&color_string[3..5], 16).ok()?;
let b: u8 = u8::from_str_radix(&color_string[5..7], 16).ok()?;
log::trace!("Read RGB color string: {},{},{}", r, g, b);
return Some(Color::RGB(r, g, b));
}
// Parse a u8 (ansi color)
if let Result::Ok(ansi_color_num) = color_string.parse::<u8>() {
log::trace!("Read ANSI color string: {}", ansi_color_num);
return Some(Color::Fixed(ansi_color_num));
}
// Check for any predefined color strings
// There are no predefined enums for bright colors, so we use Color::Fixed
let predefined_color = match color_string.to_lowercase().as_str() {
"black" => Some(Color::Black),
"red" => Some(Color::Red),
"green" => Some(Color::Green),
"yellow" => Some(Color::Yellow),
"blue" => Some(Color::Blue),
"purple" => Some(Color::Purple),
"cyan" => Some(Color::Cyan),
"white" => Some(Color::White),
"bright-black" => Some(Color::Fixed(8)), // "bright-black" is dark grey
"bright-red" => Some(Color::Fixed(9)),
"bright-green" => Some(Color::Fixed(10)),
"bright-yellow" => Some(Color::Fixed(11)),
"bright-blue" => Some(Color::Fixed(12)),
"bright-purple" => Some(Color::Fixed(13)),
"bright-cyan" => Some(Color::Fixed(14)),
"bright-white" => Some(Color::Fixed(15)),
_ => None,
};
if predefined_color.is_some() {
log::trace!("Read predefined color: {}", color_string);
} else {
log::debug!("Could not parse color in string: {}", color_string);
}
predefined_color
}
#[cfg(test)]
mod tests {
use super::*;
use starship_module_config_derive::ModuleConfig;
#[test]
fn test_load_config() {
#[derive(Clone, Default, ModuleConfig)]
struct TestConfig<'a> {
pub symbol: &'a str,
pub disabled: bool,
pub some_array: Vec<&'a str>,
}
let config = toml::toml! {
symbol = "T "
disabled = true
some_array = ["A"]
};
let mut rust_config = TestConfig {
symbol: "S ",
disabled: false,
some_array: vec!["A", "B", "C"],
};
rust_config.load_config(&config);
assert_eq!(rust_config.symbol, "T ");
assert!(rust_config.disabled);
assert_eq!(rust_config.some_array, vec!["A"]);
}
#[test]
fn test_load_nested_config() {
#[derive(Clone, Default, ModuleConfig)]
struct TestConfig<'a> {
pub untracked: SegmentDisplayConfig<'a>,
pub modified: SegmentDisplayConfig<'a>,
}
#[derive(PartialEq, Debug, Clone, Default, ModuleConfig)]
struct SegmentDisplayConfig<'a> {
pub value: &'a str,
pub style: Style,
}
let config = toml::toml! {
untracked.value = "x"
modified = { value = "•", style = "red" }
};
let mut git_status_config = TestConfig {
untracked: SegmentDisplayConfig {
value: "?",
style: Color::Red.bold(),
},
modified: SegmentDisplayConfig {
value: "!",
style: Color::Red.bold(),
},
};
git_status_config.load_config(&config);
assert_eq!(
git_status_config.untracked,
SegmentDisplayConfig {
value: "x",
style: Color::Red.bold(),
}
);
assert_eq!(
git_status_config.modified,
SegmentDisplayConfig {
value: "•",
style: Color::Red.normal(),
}
);
}
#[test]
fn test_load_optional_config() {
#[derive(Clone, Default, ModuleConfig)]
struct TestConfig<'a> {
pub optional: Option<&'a str>,
pub hidden: Option<&'a str>,
}
let config = toml::toml! {
optional = "test"
};
let mut rust_config = TestConfig {
optional: None,
hidden: None,
};
rust_config.load_config(&config);
assert_eq!(rust_config.optional, Some("test"));
assert_eq!(rust_config.hidden, None);
}
#[test]
fn test_load_enum_config() {
#[derive(Clone, Default, ModuleConfig)]
struct TestConfig {
pub switch_a: Switch,
pub switch_b: Switch,
pub switch_c: Switch,
}
#[derive(Debug, PartialEq, Clone)]
enum Switch {
On,
Off,
}
impl Default for Switch {
fn default() -> Self {
Self::Off
}
}
impl<'a> ModuleConfig<'a> for Switch {
fn from_config(config: &'a Value) -> Option<Self> {
match config.as_str()? {
"on" => Some(Self::On),
"off" => Some(Self::Off),
_ => None,
}
}
}
let config = toml::toml! {
switch_a = "on"
switch_b = "any"
};
let mut rust_config = TestConfig {
switch_a: Switch::Off,
switch_b: Switch::Off,
switch_c: Switch::Off,
};
rust_config.load_config(&config);
assert_eq!(rust_config.switch_a, Switch::On);
assert_eq!(rust_config.switch_b, Switch::Off);
assert_eq!(rust_config.switch_c, Switch::Off);
}
#[test]
fn test_from_string() {
let config = Value::String(String::from("S"));
assert_eq!(<&str>::from_config(&config).unwrap(), "S");
}
#[test]
fn test |
let config = Value::Boolean(true);
assert!(<bool>::from_config(&config).unwrap());
}
#[test]
fn test_from_i64() {
let config = Value::Integer(42);
assert_eq!(<i64>::from_config(&config).unwrap(), 42);
}
#[test]
fn test_from_style() {
let config = Value::from("red bold");
assert_eq!(<Style>::from_config(&config).unwrap(), Color::Red.bold());
}
#[test]
fn test_from_hex_color_style() {
let config = Value::from("#00000");
assert_eq!(<Style>::from_config(&config), None);
let config = Value::from("#0000000");
assert_eq!(<Style>::from_config(&config), None);
let config = Value::from("#NOTHEX");
assert_eq!(<Style>::from_config(&config), None);
let config = Value::from("#a12BcD");
assert_eq!(
<Style>::from_config(&config).unwrap(),
Color::RGB(0xA1, 0x2B, 0xCD).into()
);
}
#[test]
fn test_from_vec() {
let config: Value = Value::Array(vec![Value::from("S")]);
assert_eq!(<Vec<&str>>::from_config(&config).unwrap(), vec!["S"]);
}
#[test]
fn test_from_option() {
let config: Value = Value::String(String::from("S"));
assert_eq!(<Option<&str>>::from_config(&config).unwrap(), Some("S"));
}
#[test]
fn table_get_styles_bold_italic_underline_green_dimmed_silly_caps() {
let config = Value::from("bOlD ItAlIc uNdErLiNe GrEeN diMMeD");
let mystyle = <Style>::from_config(&config).unwrap();
assert!(mystyle.is_bold);
assert!(mystyle.is_italic);
assert!(mystyle.is_underline);
assert!(mystyle.is_dimmed);
assert_eq!(
mystyle,
ansi_term::Style::new()
.bold()
.italic()
.underline()
.dimmed()
.fg(Color::Green)
);
}
#[test]
fn table_get_styles_bold_italic_underline_green_dimmed_inverted_silly_caps() {
let config = Value::from("bOlD ItAlIc uNdErLiNe GrEeN diMMeD InVeRTed");
let mystyle = <Style>::from_config(&config).unwrap();
assert!(mystyle.is_bold);
assert!(mystyle.is_italic);
assert!(mystyle.is_underline);
assert!(mystyle.is_dimmed);
assert!(mystyle.is_reverse);
assert_eq!(
mystyle,
ansi_term::Style::new()
.bold()
.italic()
.underline()
.dimmed()
.reverse()
.fg(Color::Green)
);
}
#[test]
fn table_get_styles_plain_and_broken_styles() {
// Test a "plain" style with no formatting
let config = Value::from("");
let plain_style = <Style>::from_config(&config).unwrap();
assert_eq!(plain_style, ansi_term::Style::new());
// Test a string that's clearly broken
let config = Value::from("djklgfhjkldhlhk;j");
assert!(<Style>::from_config(&config).is_none());
// Test a string that's nullified by `none`
let config = Value::from("fg:red bg:green bold none");
assert!(<Style>::from_config(&config).is_none());
// Test a string that's nullified by `none` at the start
let config = Value::from("none fg:red bg:green bold");
assert!(<Style>::from_config(&config).is_none());
}
#[test]
fn table_get_styles_with_none() {
// Test that none on the end will result in None, overriding bg:none
let config = Value::from("fg:red bg:none none");
assert!(<Style>::from_config(&config).is_none());
// Test that none in front will result in None, overriding bg:none
let config = Value::from("none fg:red bg:none");
assert!(<Style>::from_config(&config).is_none());
// Test that none in the middle will result in None, overriding bg:none
let config = Value::from("fg:red none bg:none");
assert!(<Style>::from_config(&config).is_none());
// Test that fg:none will result in None
let config = Value::from("fg:none bg:black");
assert!(<Style>::from_config(&config).is_none());
// Test that bg:none will yield a style
let config = Value::from("fg:red bg:none");
assert_eq!(<Style>::from_config(&config).unwrap(), Color::Red.normal());
// Test that bg:none will yield a style
let config = Value::from("fg:red bg:none bold");
assert_eq!(<Style>::from_config(&config).unwrap(), Color::Red.bold());
// Test that bg:none will overwrite the previous background colour
let config = Value::from("fg:red bg:green bold bg:none");
assert_eq!(<Style>::from_config(&config).unwrap(), Color::Red.bold());
}
#[test]
fn table_get_styles_ordered() {
// Test a background style with inverted order (also test hex + ANSI)
let config = Value::from("bg:#050505 underline fg:120");
let flipped_style = <Style>::from_config(&config).unwrap();
assert_eq!(
flipped_style,
Style::new()
.underline()
.fg(Color::Fixed(120))
.on(Color::RGB(5, 5, 5))
);
// Test that the last color style is always the one used
let config = Value::from("bg:120 bg:125 bg:127 fg:127 122 125");
let multi_style = <Style>::from_config(&config).unwrap();
assert_eq!(
multi_style,
Style::new().fg(Color::Fixed(125)).on(Color::Fixed(127))
);
}
}
| _from_bool() { |
get_status.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { RouteDeps } from '../../types';
import { wrapError } from '../../utils';
import { CasesStatusResponseRt } from '../../../../../common/api';
import { CASE_SAVED_OBJECT } from '../../../../saved_object_types';
export function | ({ caseService, router }: RouteDeps) {
router.get(
{
path: '/api/cases/status',
validate: {},
},
async (context, request, response) => {
try {
const client = context.core.savedObjects.client;
const argsOpenCases = {
client,
options: {
fields: [],
page: 1,
perPage: 1,
filter: `${CASE_SAVED_OBJECT}.attributes.status: open`,
},
};
const argsClosedCases = {
client,
options: {
fields: [],
page: 1,
perPage: 1,
filter: `${CASE_SAVED_OBJECT}.attributes.status: closed`,
},
};
const [openCases, closesCases] = await Promise.all([
caseService.findCases(argsOpenCases),
caseService.findCases(argsClosedCases),
]);
return response.ok({
body: CasesStatusResponseRt.encode({
count_open_cases: openCases.total,
count_closed_cases: closesCases.total,
}),
});
} catch (error) {
return response.customError(wrapError(error));
}
}
);
}
| initGetCasesStatusApi |
struct.go | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package fidlgen_cpp
import (
"encoding/binary"
"fmt"
"strings"
"go.fuchsia.dev/fuchsia/tools/fidl/lib/fidlgen"
)
// These correspond to templated classes forward-declared in
// //src/lib/fidl/include/lib/fidl/cpp/internal/natural_types.h
var (
TypeTraits = internalNs.member("TypeTraits")
)
type Struct struct {
Attributes
fidlgen.Resourceness
nameVariants
AnonymousChildren []ScopedLayout
CodingTableType name
Members []StructMember
PaddingV1 []StructPadding
PaddingV2 []StructPadding
BackingBufferTypeV1 string
BackingBufferTypeV2 string
IsInResult bool
ParametersTupleDecl name
// Full decls needed to check if a type is memcpy compatible.
// Only set if it may be possible for a type to be memcpy compatible,
// e.g. has no padding.
// See the struct template for usage.
FullDeclMemcpyCompatibleDeps []nameVariants
TypeShapeV1 TypeShape
TypeShapeV2 TypeShape
// TypeTraits contains information about a natural domain object.
TypeTraits name
isEmptyStruct bool
isAnonymousRequestOrResponse bool
}
func (*Struct) Kind() declKind {
return Kinds.Struct
}
// AsParameters flattens the struct's members into a parameter list.
func (s *Struct) AsParameters(_ *Type, _ *HandleInformation) []Parameter {
var out []Parameter
for _, sm := range s.Members {
out = append(out, sm.AsParameter())
}
return out
}
// SetInResult marks the struct as being used in as the success variant in a
// method Result, and takes note of the tuple declaration of the result's
// parameters. Because a named struct may be in multiple results, every
// call to this function after the first one per instance is a no-op, since the
// struct would have already been marked with the same information.
func (s *Struct) SetInResult(result *Result) {
if !s.IsInResult {
s.IsInResult = true
s.ParametersTupleDecl = result.ValueTupleDecl
}
}
func (s *Struct) IsEmpty() bool {
return s.isEmptyStruct
}
var _ Kinded = (*Struct)(nil)
var _ namespaced = (*Struct)(nil)
type StructMember struct {
Attributes
nameVariants
Type Type
DefaultValue ConstantValue
OffsetV1 int
OffsetV2 int
HandleInformation *HandleInformation
Constraint string
}
var _ Member = (*StructMember)(nil)
func (sm StructMember) AsParameter() Parameter {
return Parameter{
nameVariants: sm.nameVariants,
Type: sm.Type,
OffsetV1: sm.OffsetV1,
OffsetV2: sm.OffsetV2,
HandleInformation: sm.HandleInformation,
Constraint: sm.Constraint,
}
}
func (sm StructMember) NameAndType() (string, Type) {
return sm.Name(), sm.Type
}
func (sm StructMember) StorageName() string {
return sm.Name() + "_"
}
// NaturalInitializer is an expression in natural types for initializing the
// struct member within its struct field definition. May be empty if we choose
// to delegate to the default constructor of the member type.
func (sm StructMember) NaturalInitializer() string {
var unwrapArray func(ty *Type) string
unwrapArray = func(ty *Type) string {
if ty.IsPrimitiveType() {
// Zero initialize them.
return "{}"
}
if ty.Kind == TypeKinds.Array {
return unwrapArray(ty.ElementType)
}
return ""
}
if !sm.Type.Nullable {
return unwrapArray(&sm.Type)
}
return ""
}
// NaturalPossiblyInvalidDefaultInitializer is an expression in natural types
// for how to default initialize the struct member within its struct field
// definition. May result in an invalid object if it has a strict union
// somewhere.
func (sm StructMember) NaturalPossiblyInvalidDefaultInitializer() string {
if !sm.Type.Nullable {
switch sm.Type.Kind {
case TypeKinds.Array:
return fmt.Sprintf("::fidl::internal::DefaultConstructPossiblyInvalidObject<%s>::Make()", sm.Type.Unified)
case TypeKinds.Struct, TypeKinds.Table, TypeKinds.Union:
return "::fidl::internal::DefaultConstructPossiblyInvalidObjectTag{}"
}
}
return "{}"
}
func (c *compiler) compileStructMember(val fidlgen.StructMember) StructMember {
t := c.compileType(val.Type)
defaultValue := ConstantValue{}
if val.MaybeDefaultValue != nil {
defaultValue = c.compileConstant(*val.MaybeDefaultValue, &t, val.Type)
}
return StructMember{
Attributes: Attributes{val.Attributes},
nameVariants: structMemberContext.transform(val.Name),
Type: t,
DefaultValue: defaultValue,
OffsetV1: val.FieldShapeV1.Offset,
OffsetV2: val.FieldShapeV2.Offset,
HandleInformation: c.fieldHandleInformation(&val.Type),
Constraint: t.FieldConstraint,
}
}
type StructPadding struct {
Offset int
MaskType string
Mask string
}
func toStructPadding(in fidlgen.PaddingMarker) StructPadding {
switch len(in.Mask) {
case 2:
return StructPadding{
Offset: in.Offset,
MaskType: "uint16_t",
Mask: fmt.Sprintf("0x%04x", binary.LittleEndian.Uint16(in.Mask)),
}
case 4:
return StructPadding{
Offset: in.Offset,
MaskType: "uint32_t",
Mask: fmt.Sprintf("0x%08x", binary.LittleEndian.Uint32(in.Mask)),
}
case 8:
return StructPadding{
Offset: in.Offset,
MaskType: "uint64_t",
Mask: fmt.Sprintf("0x%016xull", binary.LittleEndian.Uint64(in.Mask)),
}
default:
panic("unexpected mask size")
}
}
func toStructPaddings(in []fidlgen.PaddingMarker) []StructPadding {
var out []StructPadding
for _, m := range in {
out = append(out, toStructPadding(m))
}
return out
}
func (c *compiler) compileStruct(val fidlgen.Struct) *Struct {
name := c.compileNameVariants(val.Name)
codingTableType := name.Wire.ns.member(c.compileCodingTableType(val.Name))
r := Struct{
Attributes: Attributes{val.Attributes},
AnonymousChildren: c.getAnonymousChildren(val.Layout),
TypeShapeV1: TypeShape{val.TypeShapeV1},
TypeShapeV2: TypeShape{val.TypeShapeV2},
Resourceness: val.Resourceness,
nameVariants: name,
CodingTableType: codingTableType,
Members: []StructMember{},
BackingBufferTypeV1: computeAllocation(
TypeShape{val.TypeShapeV1}.MaxTotalSize(), boundednessBounded).
BackingBufferType(),
BackingBufferTypeV2: computeAllocation( | TypeTraits: TypeTraits.template(name.Unified),
PaddingV1: toStructPaddings(val.BuildPaddingMarkers(fidlgen.WireFormatVersionV1)),
PaddingV2: toStructPaddings(val.BuildPaddingMarkers(fidlgen.WireFormatVersionV2)),
}
for _, v := range val.Members {
r.Members = append(r.Members, c.compileStructMember(v))
}
if len(r.Members) == 0 {
r.isEmptyStruct = true
r.Members = []StructMember{
c.compileStructMember(fidlgen.EmptyStructMember("__reserved")),
}
}
// Construct a deduped list of decls for IsMemcpyCompatible template definitions.
seen := make(map[string]struct{})
for _, member := range r.Members {
if _, ok := seen[member.Type.HLCPP.String()]; ok {
continue
}
seen[member.Type.HLCPP.String()] = struct{}{}
// The dangerous identifiers test package contains identifiers that won't compile.
// e.g. ::fidl::test::dangerous::struct::types::camel::Interface gives an
// "expected unqualified-id" error because of "struct".
// There isn't an easily accessible dangerous identifiers list to replace identifiers.
if strings.Contains(member.Type.HLCPP.String(), "::fidl::test::dangerous::") {
continue
}
r.FullDeclMemcpyCompatibleDeps = append(r.FullDeclMemcpyCompatibleDeps, member.Type.nameVariants)
}
return &r
} | TypeShape{val.TypeShapeV2}.MaxTotalSize(), boundednessBounded).
BackingBufferType(),
IsInResult: false, |
tagsel.py | import re
from .tag import normalize
class BadExpression(Exception):
def __init__(self, txt):
Exception.__init__(self, txt)
def _negate(txt, negated):
return "NOT " + txt if negated else txt
class BinaryExpr():
def __init__(self, lhs=None, operator=None):
self.negated = False
self.lhs = lhs
self.operator = operator
self.rhs = None
def __str__(self):
return _negate("%s %s %s" % (self.lhs, self.operator.upper(), str(self.rhs)), self.negated)
def matches(self, tags):
lhs_matches = self.lhs.matches(tags)
rhs_matches = self.rhs.matches(tags)
combo = bool(lhs_matches and rhs_matches) if self.operator == 'and' else bool(lhs_matches or rhs_matches)
return combo != self.negated
class ValueExpr():
def __init__(self, value=None):
self.negated = False
self.value = value
def __str__(self):
return _negate(self.value, self.negated)
def matches(self, tags):
return (self.value in tags) != self.negated
class GroupedExpr():
def __init__(self, expr):
self.negated = False
self.expr = expr
def __str__(self):
return _negate('(%s)' % str(self.expr), self.negated)
def matches(self, tags):
return self.expr.matches(tags) != self.negated
"""Parser expects an expression; will be satisfied by group or tag, will allow 'not'."""
NEED_EXPR = 0
"""Parser has an expression; will only allow 'and' or 'or' to extend."""
EXTENDING_EXPR = 1
"""Parser is waiting while a parenthesized subexpression is being parsed."""
IN_PARENS = 2
class Parser:
def __init__(self, parent = None):
self.parent = parent
self.expr = None
self.negate_next = False
self.subparser = None
self._transition_to(NEED_EXPR)
def _assign_expr(self, expr):
self.expr = expr
self.expr.negated = self.negate_next
self.negate_next = False
@property
def trailing_rhs(self):
x = self.expr
while isinstance(x, BinaryExpr):
if x.rhs is None:
|
x = x.rhs
def _update_expr(self, expr=None, operator=None):
"""
React to newly parsed info about the current expression.
"""
if self.expr is None:
self._assign_expr(expr)
else:
if isinstance(self.expr, ValueExpr) or isinstance(self.expr, GroupedExpr):
self._assign_expr(BinaryExpr(self.expr, operator))
elif isinstance(self.expr, BinaryExpr):
trailing_rhs = self.trailing_rhs
if trailing_rhs:
trailing_rhs.rhs = expr
trailing_rhs.rhs.negated = self.negate_next
self.negate_next = False
else:
# We have to compare the old binary operator to the new one. If new is
# 'and' and old is 'or', then we need to rebind to honor the greater
# precedence of 'and'. Example:
# #a or #b and #c
# ^------^________initial grouping into BinaryExpr
# ^______should cause #b to rebind with #c
if operator == 'and' and self.expr.operator == 'or':
self.expr.rhs = BinaryExpr(self.expr.rhs, operator)
else:
self._assign_expr(BinaryExpr(self.expr, operator))
def _transition_to(self, new_state):
self.state = new_state
if new_state == NEED_EXPR:
self.on_token_func = self._on_while_need
elif new_state == EXTENDING_EXPR:
self.on_token_func = self._on_while_ext
elif new_state == IN_PARENS:
self.on_token_func = self._on_while_in_parens
else:
assert False == 'implemented'
def on_token(self, token):
"""
Process another token.
"""
if self.subparser:
self.subparser.on_token(token)
else:
self.on_token_func(token)
def _on_while_in_parens(self, token):
"""
Handle token in the IN_PARENS state.
"""
if token == ')':
self.update_expr(self.subparser.expr)
self.subparser.parent = None
self.subparser = None
self._transition_to(EXTENDING_EXPR)
else:
raise BadExpression('Unexpected token "%s" when expecting open or close paren.' % token)
def _on_while_need(self, token):
"""
Handle token in the NEED_EXPR state (no meaningful content seen yet).
"""
if token == '(':
# Handle grouping by recursing and passing parsing responsibility to
# a parser for the subexpression.
self.subparser = Parser(self)
self._transition_to(IN_PARENS)
elif token == 'not':
self.negate_next = not self.negate_next
elif token.startswith('#'):
token = normalize(token)
if len(token) == 1:
raise BadExpression('Invalid tag.')
self._update_expr(ValueExpr(token))
self._transition_to(EXTENDING_EXPR)
else:
raise BadExpression('Unexpected token "%s".' % token)
def _on_while_ext(self, token):
"""
Handle token in the EXTEND_EXPR state.
"""
if token in ['and', 'or']:
self._update_expr(operator=token)
self._transition_to(NEED_EXPR)
elif token == ')':
p = self.parent
if not p:
raise BadExpression("Unexpected close paren.")
p._update_expr(GroupedExpr(self.expr))
# Do I have to transfer negation by checking whether internal and external negation are different?
p.subparser = self.parent = None
p._transition_to(EXTENDING_EXPR)
else:
raise BadExpression('Unexpected token "%s".' % token)
_TOKEN_SEPARATOR = re.compile('[ \t\r\n]+')
def parse(expr):
# Make sure operators are lower case and that parens are
# separated from any tokens they touch.
expr = expr.lower().replace('(', ' ( ').replace(')', ' ) ').strip()
tokens = _TOKEN_SEPARATOR.split(expr)
parser = Parser()
for token in tokens:
parser.on_token(token)
if parser.state == IN_PARENS:
raise BadExpression("Missing close paren.")
elif parser.state != EXTENDING_EXPR:
raise BadExpression("Incomplete expression.")
return parser.expr
| return x |
application_server_info.py | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.exchange_database_copy_info
import cohesity_management_sdk.models.exchange_database_info
class ApplicationServerInfo(object):
"""Implementation of the 'ApplicationServerInfo' model.
Specifies the Information about the Exchange Server Node.
Attributes:
database_copy_info_list (list of ExchangeDatabaseCopyInfo): Specifies | node. This is populated for the Standlone Exchange Servers.
fqdn (string): Specifies the fully qualified domain name of the
Exchange Server.
guid (string): Specifies the Guid of the Exchange Application Server.
name (string): Specifies the display name of the Exchange
Application Server.
total_size_bytes (int): Specifies the total size of all Exchange
database copies in all the Exchange Application Servers that are
part of the DAG.
"""
# Create a mapping from Model property names to API property names
_names = {
"database_copy_info_list": 'databaseCopyInfoList',
"database_info_list":'databaseInfoList',
"fqdn": 'fqdn',
"guid": 'guid',
"name": 'name',
"total_size_bytes":'totalSizeBytes'
}
def __init__(self,
database_copy_info_list=None,
database_info_list=None,
fqdn=None,
guid=None,
name=None,
total_size_bytes=None):
"""Constructor for the ApplicationServerInfo class"""
# Initialize members of the class
self.database_copy_info_list = database_copy_info_list
self.database_info_list = database_info_list
self.fqdn = fqdn
self.guid = guid
self.name = name
self.total_size_bytes = total_size_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
database_copy_info_list = None
if dictionary.get('databaseCopyInfoList') != None:
database_copy_info_list = list()
for structure in dictionary.get('databaseCopyInfoList'):
database_copy_info_list.append(cohesity_management_sdk.models.exchange_database_copy_info.ExchangeDatabaseCopyInfo.from_dictionary(structure))
database_info_list = None
if dictionary.get('databaseInfoList') != None:
database_info_list = list()
for structure in dictionary.get('databaseInfoList'):
database_info_list.append(cohesity_management_sdk.models.exchange_database_info.ExchangeDatabaseInfo.from_dictionary(structure))
fqdn = dictionary.get('fqdn')
guid = dictionary.get('guid')
name = dictionary.get('name')
total_size_bytes = dictionary.get('totalSizeBytes')
# Return an object of this model
return cls(database_copy_info_list,
database_info_list,
fqdn,
guid,
name,
total_size_bytes) | the list of all the copies of the Exchange databases(that are part
of DAG) that are present on this Exchange Node.
database_info_list (list of ExchangeDatabaseInfo): Specifies the list
of all the databases available on the standalone Exchange server |
global_config.py | from django.conf import settings
def global_settings(request):
| """ Return custom constant global variables to be
used widely for all of our apps. """
# Current user logged in info
cur_user_id = 0
cur_user_name = ''
cur_user_full_name = ''
if request.user.is_authenticated:
# Get user info
cur_user_id = request.user.id
cur_user_name = request.user.username
cur_user_full_name = request.user.first_name + " " + request.user.last_name
return{
'BASE_URL': settings.BASE_URL,
'SITE_SHORT_NAME': settings.SITE_SHORT_NAME,
'SITE_FULL_NAME': settings.SITE_FULL_NAME,
'SITE_YEAR_STARTED': settings.SITE_YEAR_STARTED,
'SITE_URL_HOME': settings.SITE_URL_HOME,
'SITE_SLOGAN': settings.SITE_SLOGAN,
'SITE_CONTACT_US': settings.SITE_CONTACT_US,
'MIN_CHARS_SEARCH': settings.MIN_CHARS_SEARCH,
'APP_URL_TOP_LOGO': settings.APP_URL_TOP_LOGO,
'GRECAP_SITE_KEY': settings.GRECAP_SITE_KEY,
'DEFAULT_AVATAR': settings.DEFAULT_AVATAR,
'CUR_USER_ID': cur_user_id,
'CUR_USER_name': cur_user_name,
'CUR_USER_full_name': cur_user_full_name.strip(),
} |
|
make_mode.py | # -*- coding: utf-8 -*-
"""
sphinx.make_mode
~~~~~~~~~~~~~~~~
sphinx-build -M command-line handling.
This replaces the old, platform-dependent and once-generated content
of Makefile / make.bat.
This is in its own module so that importing it is fast. It should not
import the main Sphinx modules (like sphinx.applications, sphinx.builders).
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import subprocess
import sys
from os import path
import sphinx
from sphinx import cmdline
from sphinx.util.console import color_terminal, nocolor, bold, blue # type: ignore
from sphinx.util.osutil import cd, rmtree
if False:
# For type annotation
from typing import List # NOQA
proj_name = os.getenv('SPHINXPROJ', '<project>')
BUILDERS = [
("", "html", "to make standalone HTML files"),
("", "dirhtml", "to make HTML files named index.html in directories"),
("", "singlehtml", "to make a single large HTML file"),
("", "pickle", "to make pickle files"),
("", "json", "to make JSON files"),
("", "htmlhelp", "to make HTML files and an HTML help project"),
("", "qthelp", "to make HTML files and a qthelp project"),
("", "devhelp", "to make HTML files and a Devhelp project"),
("", "epub", "to make an epub"),
("", "latex", "to make LaTeX files, you can set PAPER=a4 or PAPER=letter"),
("posix", "latexpdf", "to make LaTeX and PDF files (default pdflatex)"),
("posix", "latexpdfja", "to make LaTeX files and run them through platex/dvipdfmx"),
("", "text", "to make text files"),
("", "man", "to make manual pages"),
("", "texinfo", "to make Texinfo files"),
("posix", "info", "to make Texinfo files and run them through makeinfo"),
("", "gettext", "to make PO message catalogs"),
("", "changes", "to make an overview of all changed/added/deprecated items"),
("", "xml", "to make Docutils-native XML files"),
("", "pseudoxml", "to make pseudoxml-XML files for display purposes"),
("", "linkcheck", "to check all external links for integrity"),
("", "doctest", "to run all doctests embedded in the documentation "
"(if enabled)"),
("", "coverage", "to run coverage check of the documentation (if enabled)"),
]
class Make(object):
def __init__(self, srcdir, builddir, opts):
# type: (unicode, unicode, List[unicode]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
# type: (unicode) -> unicode
return path.join(self.builddir, *comps)
def build_clean(self):
# type: () -> int
if not path.exists(self.builddir):
return 0
elif not path.isdir(self.builddir):
print("Error: %r is not a directory!" % self.builddir)
return 1
print("Removing everything under %r..." % self.builddir)
for item in os.listdir(self.builddir):
rmtree(self.builddir_join(item))
return 0
def build_help(self):
# type: () -> None
if not color_terminal():
nocolor()
print(bold("Sphinx v%s" % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description))
def | (self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_latexpdfja(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf-ja'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_info(self):
# type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
try:
with cd(self.builddir_join('texinfo')):
return subprocess.call([self.makecmd, 'info'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_gettext(self):
# type: () -> int
dtdir = self.builddir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
return 0
def run_generic_build(self, builder, doctreedir=None):
# type: (unicode, unicode) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
if papersize in ('a4', 'letter'):
opts.extend(['-D', 'latex_elements.papersize=' + papersize + 'paper'])
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
args = ['-b', builder,
'-d', doctreedir,
self.srcdir,
self.builddir_join(builder)]
return cmdline.main(args + opts)
def run_make_mode(args):
# type: (List[unicode]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
return 1
make = Make(args[1], args[2], args[3:])
run_method = 'build_' + args[0]
if hasattr(make, run_method):
return getattr(make, run_method)()
return make.run_generic_build(args[0])
| build_latexpdf |
306919cb.a387d9db.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[64],{123:function(n,e,t){"use strict";t.r(e),t.d(e,"frontMatter",(function(){return i})),t.d(e,"metadata",(function(){return r})),t.d(e,"rightToc",(function(){return l})),t.d(e,"default",(function(){return c}));var o=t(2),a=t(6),s=(t(0),t(422)),i={id:"configuration",title:"Configuration"},r={unversionedId:"reference/configuration",id:"reference/configuration",isDocsHomePage:!1,title:"Configuration",description:"\x3c!-- THIS FILE IS BEING AUTO-GENERATED. DO NOT MODIFY IT AS ALL CHANGES WILL BE OVERWRITTEN.",source:"@site/docs/reference/configuration.md",slug:"/reference/configuration",permalink:"/kratos/docs/next/reference/configuration",editUrl:"https://github.com/ory/kratos/edit/master/docs/docs/reference/configuration.md",version:"current",lastUpdatedBy:"aeneasr",lastUpdatedAt:1603806907,sidebar:"docs",previous:{title:"Setting up Password Hashing Parameters",permalink:"/kratos/docs/next/guides/setting-up-password-hashing-parameters"},next:{title:"JSON Schema and JSON Paths",permalink:"/kratos/docs/next/reference/json-schema-json-paths"}},l=[],u={rightToc:l};function c(n){var e=n.components,t=Object(a.a)(n,["components"]);return Object(s.b)("wrapper",Object(o.a)({},u,t,{components:e,mdxType:"MDXLayout"}),Object(s.b)("p",null,"If file ",Object(s.b)("inlineCode",{parentName:"p"},"$HOME/.kratos.yaml")," exists, it will be used as a configuration file which supports all\nconfiguration settings listed below."),Object(s.b)("p",null,"You can load the config file from another source using the ",Object(s.b)("inlineCode",{parentName:"p"},"-c path/to/config.yaml")," or ",Object(s.b)("inlineCode",{parentName:"p"},"--config path/to/config.yaml"),"\nflag: ",Object(s.b)("inlineCode",{parentName:"p"},"kratos --config path/to/config.yaml"),"."),Object(s.b)("p",null,"Config files can be formatted as JSON, YAML and TOML. Some configuration values support reloading without server restart.\nAll configuration values can be set using environment variables, as documented below."),Object(s.b)("p",null,"To find out more about edge cases like setting string array values through environmental variables head to the\n",Object(s.b)("a",Object(o.a)({parentName:"p"},{href:"https://www.ory.sh/docs/ecosystem/configuring"}),"Configuring ORY services")," section."),Object(s.b)("pre",null,Object(s.b)("code",Object(o.a)({parentName:"pre"},{className:"language-yaml"}),"## ORY Kratos Configuration\n#\n\n\n## identity ##\n#\nidentity:\n \n ## JSON Schema URL for default identity traits ##\n #\n # Path to the JSON Schema which describes a default identity's traits.\n #\n # Examples:\n # - file://path/to/identity.traits.schema.json\n # - https://foo.bar.com/path/to/identity.traits.schema.json\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export IDENTITY_DEFAULT_SCHEMA_URL=<value>\n # - Windows Command Line (CMD):\n # > set IDENTITY_DEFAULT_SCHEMA_URL=<value>\n #\n default_schema_url: file://path/to/identity.traits.schema.json\n\n ## Additional JSON Schemas for Identity Traits ##\n #\n # Examples:\n # - - id: customer\n # url: https://foo.bar.com/path/to/customer.traits.schema.json\n # - id: employee\n # url: https://foo.bar.com/path/to/employee.traits.schema.json\n # - id: employee-v2\n # url: https://foo.bar.com/path/to/employee.v2.traits.schema.json\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export IDENTITY_SCHEMAS=<value>\n # - Windows Command Line (CMD):\n # > set IDENTITY_SCHEMAS=<value>\n #\n schemas:\n - id: customer\n url: https://foo.bar.com/path/to/customer.traits.schema.json\n - id: employee\n url: https://foo.bar.com/path/to/employee.traits.schema.json\n - id: employee-v2\n url: https://foo.bar.com/path/to/employee.v2.traits.schema.json\n\n## Data Source Name ##\n#\n# DSN is used to specify the database credentials as a connection URI.\n#\n# Examples:\n# - \"postgres://user:\n# password@postgresd:5432/database?sslmode=disable&max_conns=20&max_idle_conns=\\\n# 4\"\n# - mysql://user:secret@tcp(mysqld:3306)/database?max_conns=20&max_idle_conns=4\n# - cockroach://user@cockroachdb:26257/database?sslmode=disable&max_conns=20&max_idle_conns=4\n# - sqlite:///var/lib/sqlite/db.sqlite?_fk=true&mode=rwc\n# \n# Set this value using environment variables on\n# - Linux/macOS:\n# $ export DSN=<value>\n# - Windows Command Line (CMD):\n# > set DSN=<value>\n#\ndsn: \"postgres://user:\n password@postgresd:5432/database?sslmode=disable&max_conns=20&max_idle_conns=\\\n 4\"\n\n## selfservice ##\n#\nselfservice:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## Whitelisted Return To URLs ##\n #\n # List of URLs that are allowed to be redirected to. A redirection request is made by appending `?return_to=...` to Login, Registration, and other self-service flows.\n #\n # Examples:\n # - - https://app.my-app.com/dashboard\n # - /dashboard\n # - https://www.my-app.com/\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_WHITELISTED_RETURN_URLS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_WHITELISTED_RETURN_URLS=<value>\n #\n whitelisted_return_urls:\n - https://app.my-app.com/dashboard\n - /dashboard\n - https://www.my-app.com/\n\n ## flows ##\n #\n flows:\n \n ## settings ##\n #\n settings:\n \n ## URL of the Settings page. ##\n #\n # URL where the Settings UI is hosted. Check the [reference implementation](https://github.com/ory/kratos-selfservice-ui-node).\n #\n # Default value: https://www.ory.sh/kratos/docs/fallback/settings\n #\n # Examples:\n # - https://my-app.com/user/settings\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_UI_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_UI_URL=<value>\n #\n ui_url: https://www.ory.sh/kratos/docs/fallback/settings\n\n ## lifespan ##\n #\n # Default value: 1h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_LIFESPAN=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_LIFESPAN=<value>\n #\n lifespan: 1s\n\n ## privileged_session_max_age ##\n #\n # Default value: 1h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_PRIVILEGED_SESSION_MAX_AGE=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_PRIVILEGED_SESSION_MAX_AGE=<value>\n #\n privileged_session_max_age: 1s\n\n ## after ##\n #\n after:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: https://my-app.com/dashboard\n\n ## password ##\n #\n password:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_AFTER_PASSWORD_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_AFTER_PASSWORD_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: https://my-app.com/dashboard\n\n ## hooks ##\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_AFTER_PASSWORD_HOOKS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_AFTER_PASSWORD_HOOKS=<value>\n #\n hooks:\n - hook: verify\n\n ## profile ##\n #\n profile:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_AFTER_PROFILE_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_AFTER_PROFILE_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## hooks ##\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_SETTINGS_AFTER_PROFILE_HOOKS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_SETTINGS_AFTER_PROFILE_HOOKS=<value>\n #\n hooks:\n - hook: verify\n\n ## logout ##\n #\n logout:\n \n ## after ##\n #\n after:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGOUT_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGOUT_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## registration ##\n #\n registration:\n \n ## Registration UI URL ##\n #\n # URL where the Registration UI is hosted. Check the [reference implementation](https://github.com/ory/kratos-selfservice-ui-node).\n #\n # Default value: https://www.ory.sh/kratos/docs/fallback/registration\n #\n # Examples:\n # - https://my-app.com/signup\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_UI_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_UI_URL=<value>\n #\n ui_url: https://www.ory.sh/kratos/docs/fallback/registration\n\n ## lifespan ##\n #\n # Default value: 1h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_LIFESPAN=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_LIFESPAN=<value>\n #\n lifespan: 1s\n\n ## after ##\n #\n after:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## password ##\n #\n password:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_AFTER_PASSWORD_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_AFTER_PASSWORD_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: https://my-app.com/dashboard\n\n ## hooks ##\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_AFTER_PASSWORD_HOOKS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_AFTER_PASSWORD_HOOKS=<value>\n #\n hooks:\n - hook: session\n\n ## oidc ##\n #\n oidc:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_AFTER_OIDC_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_AFTER_OIDC_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: https://my-app.com/dashboard\n\n ## hooks ##\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_REGISTRATION_AFTER_OIDC_HOOKS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_REGISTRATION_AFTER_OIDC_HOOKS=<value>\n #\n hooks:\n - hook: session\n\n ## login ##\n #\n login:\n \n ## Login UI URL ##\n #\n # URL where the Login UI is hosted. Check the [reference implementation](https://github.com/ory/kratos-selfservice-ui-node).\n #\n # Default value: https://www.ory.sh/kratos/docs/fallback/login\n #\n # Examples:\n # - https://my-app.com/login\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_UI_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_UI_URL=<value>\n #\n ui_url: https://my-app.com/login\n\n ## lifespan ##\n #\n # Default value: 1h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_LIFESPAN=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_LIFESPAN=<value>\n #\n lifespan: 1m\n\n ## after ##\n #\n after:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## password ##\n #\n password:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_AFTER_PASSWORD_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_AFTER_PASSWORD_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## hooks ##\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_AFTER_PASSWORD_HOOKS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_AFTER_PASSWORD_HOOKS=<value>\n #\n hooks:\n - hook: revoke_active_sessions\n\n ## oidc ##\n #\n oidc:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_AFTER_OIDC_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_AFTER_OIDC_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: https://my-app.com/dashboard\n\n ## hooks ##\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_LOGIN_AFTER_OIDC_HOOKS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_LOGIN_AFTER_OIDC_HOOKS=<value>\n #\n hooks:\n - hook: revoke_active_sessions\n\n ## Email and Phone Verification and Account Activation Configuration ##\n #\n verification:\n \n ## Enable Email/Phone Verification ##\n #\n # If set to true will enable [Email and Phone Verification and Account Activation](https://www.ory.sh/kratos/docs/self-service/flows/verify-email-account-activation/).\n #\n # Default value: false\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_VERIFICATION_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_VERIFICATION_ENABLED=<value>\n #\n enabled: false\n\n ## Verify UI URL ##\n #\n # URL where the ORY Verify UI is hosted. This is the page where users activate and / or verify their email or telephone number. Check the [reference implementation](https://github.com/ory/kratos-selfservice-ui-node).\n #\n # Default value: https://www.ory.sh/kratos/docs/fallback/verification\n #\n # Examples:\n # - https://my-app.com/verify\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_VERIFICATION_UI_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_VERIFICATION_UI_URL=<value>\n #\n ui_url: https://www.ory.sh/kratos/docs/fallback/verification\n\n ## after ##\n #\n after:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_VERIFICATION_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_VERIFICATION_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: https://my-app.com/dashboard\n\n ## Self-Service Verification Request Lifespan ##\n #\n # Sets how long the verification request (for the UI interaction) is valid.\n #\n # Default value: 1h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_VERIFICATION_LIFESPAN=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_VERIFICATION_LIFESPAN=<value>\n #\n lifespan: 1h\n\n ## Account Recovery Configuration ##\n #\n recovery:\n \n ## Enable Account Recovery ##\n #\n # If set to true will enable [Account Recovery](https://www.ory.sh/kratos/docs/self-service/flows/password-reset-account-recovery/).\n #\n # Default value: false\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_RECOVERY_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_RECOVERY_ENABLED=<value>\n #\n enabled: false\n\n ## Recovery UI URL ##\n #\n # URL where the ORY Recovery UI is hosted. This is the page where users request and complete account recovery. Check the [reference implementation](https://github.com/ory/kratos-selfservice-ui-node).\n #\n # Default value: https://www.ory.sh/kratos/docs/fallback/recovery\n #\n # Examples:\n # - https://my-app.com/verify\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_RECOVERY_UI_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_RECOVERY_UI_URL=<value>\n #\n ui_url: https://www.ory.sh/kratos/docs/fallback/recovery\n\n ## after ##\n #\n after:\n \n ## Redirect browsers to set URL per default ##\n #\n # ORY Kratos redirects to this URL per default on completion of self-service flows and other browser interaction. Read this [article for more information on browser redirects](https://www.ory.sh/kratos/docs/concepts/browser-redirect-flow-completion).\n #\n # Examples:\n # - https://my-app.com/dashboard\n # - /dashboard\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_RECOVERY_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_RECOVERY_AFTER_DEFAULT_BROWSER_RETURN_URL=<value>\n #\n default_browser_return_url: /dashboard\n\n ## Self-Service Recovery Request Lifespan ##\n #\n # Sets how long the recovery request is valid. If expired, the user has to redo the flow.\n #\n # Default value: 1h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_RECOVERY_LIFESPAN=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_RECOVERY_LIFESPAN=<value>\n #\n lifespan: 1h\n\n ## error ##\n #\n error:\n \n ## ORY Kratos Error UI URL ##\n #\n # URL where the ORY Kratos Error UI is hosted. Check the [reference implementation](https://github.com/ory/kratos-selfservice-ui-node).\n #\n # Default value: https://www.ory.sh/kratos/docs/fallback/error\n #\n # Examples:\n # - https://my-app.com/kratos-error\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_FLOWS_ERROR_UI_URL=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_FLOWS_ERROR_UI_URL=<value>\n #\n ui_url: https://my-app.com/kratos-error\n\n ## methods ##\n #\n methods:\n \n ## profile ##\n #\n profile:\n \n ## Enables Profile Management Method ##\n #\n # Default value: true\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_METHODS_PROFILE_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_METHODS_PROFILE_ENABLED=<value>\n #\n enabled: true\n\n ## link ##\n #\n link:\n \n ## Enables Link Method ##\n #\n # Default value: true\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_METHODS_LINK_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_METHODS_LINK_ENABLED=<value>\n #\n enabled: false\n\n ## password ##\n #\n password:\n \n ## Enables Username/Email and Password Method ##\n #\n # Default value: true\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_METHODS_PASSWORD_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_METHODS_PASSWORD_ENABLED=<value>\n #\n enabled: false\n\n ## oidc ##\n #\n oidc:\n \n ## Enables OpenID Connect Method ##\n #\n # Default value: false\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_METHODS_OIDC_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_METHODS_OIDC_ENABLED=<value>\n #\n enabled: true\n\n ## config ##\n #\n config:\n \n ## OpenID Connect and OAuth2 Providers ##\n #\n # A list and configuration of OAuth2 and OpenID Connect providers ORY Kratos should integrate with.\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SELFSERVICE_METHODS_OIDC_CONFIG_PROVIDERS=<value>\n # - Windows Command Line (CMD):\n # > set SELFSERVICE_METHODS_OIDC_CONFIG_PROVIDERS=<value>\n #\n providers:\n - id: google\n provider: google\n client_id: magna dolor\n client_secret: nulla\n mapper_url: file://path/to/oidc.jsonnet\n issuer_url: https://accounts.google.com\n auth_url: https://accounts.google.com/o/oauth2/v2/auth\n token_url: https://www.googleapis.com/oauth2/v4/token\n scope:\n - offline_access\n - offline_access\n - profile\n - offline_access\n - offline_access\n tenant: 8eaef023-2b34-4da1-9baa-8bc8c9d6a490\n requested_claims:\n &a2\n id_token:\n ? email\n ? email_verified\n - id: google\n provider: google\n client_id: sint proident dolore labore est\n client_secret: cupidatat do\n mapper_url: file://path/to/oidc.jsonnet\n issuer_url: https://accounts.google.com\n auth_url: https://accounts.google.com/o/oauth2/v2/auth\n token_url: https://www.googleapis.com/oauth2/v4/token\n scope:\n - profile\n - offline_access\n tenant: consumers\n requested_claims:\n &a1\n userinfo:\n given_name:\n essential: true\n nickname: null\n email:\n essential: true\n email_verified:\n essential: true\n picture: null\n http://example.info/claims/groups: null\n id_token:\n auth_time:\n essential: true\n acr:\n values:\n - urn:mace:incommon:iap:silver\n - id: google\n provider: google\n client_id: Duis ex qui veniam ut\n client_secret: elit in quis eiusmod dolore\n mapper_url: base64://bG9jYWwgc3ViamVjdCA9I...\n issuer_url: https://accounts.google.com\n auth_url: https://accounts.google.com/o/oauth2/v2/auth\n token_url: https://www.googleapis.com/oauth2/v4/token\n scope:\n - profile\n - profile\n - profile\n - profile\n tenant: consumers\n requested_claims: *a1\n - id: google\n provider: google\n client_id: culpa qui adipisicing\n client_secret: dolor proident eiusmod ut\n mapper_url: base64://bG9jYWwgc3ViamVjdCA9I...\n issuer_url: https://accounts.google.com\n auth_url: https://accounts.google.com/o/oauth2/v2/auth\n token_url: https://www.googleapis.com/oauth2/v4/token\n scope:\n - offline_access\n - offline_access\n - offline_access\n tenant: organizations\n requested_claims: *a2\n - id: google\n provider: google\n client_id: velit dolore non laborum magna\n client_secret: esse\n mapper_url: https://foo.bar.com/path/to/oidc.jsonnet\n issuer_url: https://accounts.google.com\n auth_url: https://accounts.google.com/o/oauth2/v2/auth\n token_url: https://www.googleapis.com/oauth2/v4/token\n scope:\n - offline_access\n - profile\n - profile\n - offline_access\n tenant: common\n requested_claims: *a1\n\n## Courier configuration ##\n#\n# The courier is responsible for sending and delivering messages over email, sms, and other means.\n#\ncourier:\n \n ## SMTP Configuration ##\n #\n # Configures outgoing emails using the SMTP protocol.\n #\n smtp:\n \n ## SMTP connection string ##\n #\n # This URI will be used to connect to the SMTP server. Use the query parameter to allow (`?skip_ssl_verify=true`) or disallow (`?skip_ssl_verify=false`) self-signed TLS certificates. Please keep in mind that any host other than localhost / 127.0.0.1 must use smtp over TLS (smtps) or the connection will not be possible.\n #\n # Examples:\n # - smtps://foo:bar@my-mailserver:1234/?skip_ssl_verify=false\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export COURIER_SMTP_CONNECTION_URI=<value>\n # - Windows Command Line (CMD):\n # > set COURIER_SMTP_CONNECTION_URI=<value>\n #\n connection_uri: smtps://foo:bar@my-mailserver:1234/?skip_ssl_verify=false\n\n ## SMTP Sender Address ##\n #\n # The recipient of an email will see this as the sender address.\n #\n # Default value: [email protected]\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export COURIER_SMTP_FROM_ADDRESS=<value>\n # - Windows Command Line (CMD):\n # > set COURIER_SMTP_FROM_ADDRESS=<value>\n #\n from_address: [email protected]\n\n ## Override message templates ##\n #\n # You can override certain or all message templates by pointing this key to the path where the templates are located.\n #\n # Examples:\n # - /conf/courier-templates\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export COURIER_TEMPLATE_OVERRIDE_PATH=<value>\n # - Windows Command Line (CMD):\n # > set COURIER_TEMPLATE_OVERRIDE_PATH=<value>\n #\n template_override_path: /conf/courier-templates\n\n## serve ##\n#\nserve:\n \n ## admin ##\n #\n admin:\n \n ## Admin Base URL ##\n #\n # The URL where the admin endpoint is exposed at.\n #\n # Examples:\n # - https://kratos.private-network:4434/\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_ADMIN_BASE_URL=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_ADMIN_BASE_URL=<value>\n #\n base_url: https://kratos.private-network:4434/\n\n ## Admin Host ##\n #\n # The host (interface) kratos' admin endpoint listens on.\n #\n # Default value: 0.0.0.0\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_ADMIN_HOST=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_ADMIN_HOST=<value>\n #\n host: dolor do qui\n\n ## Admin Port ##\n #\n # The port kratos' admin endpoint listens on.\n #\n # Default value: 4434\n #\n # Minimum value: 1\n #\n # Maximum value: 65535\n #\n # Examples:\n # - 4434\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_ADMIN_PORT=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_ADMIN_PORT=<value>\n #\n port: 4434\n\n ## public ##\n #\n public:\n \n ## cors ##\n #\n # Configures Cross Origin Resource Sharing for public endpoints.\n #\n cors:\n \n ## enabled ##\n #\n # Sets whether CORS is enabled.\n #\n # Default value: false\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_ENABLED=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_ENABLED=<value>\n #\n enabled: true\n\n ## allowed_origins ##\n #\n # A list of origins a cross-domain request can be executed from. If the special * value is present in the list, all origins will be allowed. An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com). Only one wildcard can be used per origin.\n #\n # Default value: *\n #\n # Examples:\n # - - https://example.com\n # - https://*.example.com\n # - https://*.foo.example.com\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_ALLOWED_ORIGINS=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_ALLOWED_ORIGINS=<value>\n #\n allowed_origins:\n - https://example.com\n - https://*.example.com\n - https://*.foo.example.com\n\n ## allowed_methods ##\n #\n # A list of HTTP methods the user agent is allowed to use with cross-domain requests.\n #\n # Default value: POST,GET,PUT,PATCH,DELETE\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_ALLOWED_METHODS=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_ALLOWED_METHODS=<value>\n #\n allowed_methods:\n - OPTIONS\n - GET\n\n ## allowed_headers ##\n #\n # A list of non simple headers the client is allowed to use with cross-domain requests.\n #\n # Default value: Authorization,Content-Type,X-Session-Token\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_ALLOWED_HEADERS=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_ALLOWED_HEADERS=<value>\n #\n allowed_headers:\n - Lorem sint cillum consequat occaecat\n - est ut Lorem non minim\n\n ## exposed_headers ##\n #\n # Sets which headers are safe to expose to the API of a CORS API specification.\n #\n # Default value: Content-Type\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_EXPOSED_HEADERS=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_EXPOSED_HEADERS=<value>\n #\n exposed_headers:\n - labore fugiat\n - elit irure tempor nostrud mollit\n - sed Duis Lorem in\n - id sed dolore culpa\n\n ## allow_credentials ##\n #\n # Sets whether the request can include user credentials like cookies, HTTP authentication or client side SSL certificates.\n #\n # Default value: true\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_ALLOW_CREDENTIALS=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_ALLOW_CREDENTIALS=<value>\n #\n allow_credentials: true\n\n ## options_passthrough ##\n #\n # TODO\n #\n # Default value: false\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_OPTIONS_PASSTHROUGH=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_OPTIONS_PASSTHROUGH=<value>\n #\n options_passthrough: true\n\n ## max_age ##\n #\n # Sets how long (in seconds) the results of a preflight request can be cached. If set to 0, every request is preceded by a preflight request.\n #\n # Minimum value: 0\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_MAX_AGE=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_MAX_AGE=<value>\n #\n max_age: 34415389\n\n ## debug ##\n #\n # Adds additional log output to debug server side CORS issues.\n #\n # Default value: false\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_CORS_DEBUG=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_CORS_DEBUG=<value>\n #\n debug: false\n\n ## Public Base URL ##\n #\n # The URL where the public endpoint is exposed at.\n #\n # Examples:\n # - https://my-app.com/.ory/kratos/public\n # - /.ory/kratos/public/\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_BASE_URL=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_BASE_URL=<value>\n #\n base_url: /.ory/kratos/public/\n\n ## Public Host ##\n #\n # The host (interface) kratos' public endpoint listens on.\n #\n # Default value: 0.0.0.0\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_HOST=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_HOST=<value>\n #\n host: labore sit\n\n ## Public Port ##\n #\n # The port kratos' public endpoint listens on.\n #\n # Default value: 4433\n #\n # Minimum value: 1\n #\n # Maximum value: 65535\n #\n # Examples:\n # - 4433\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SERVE_PUBLIC_PORT=<value>\n # - Windows Command Line (CMD):\n # > set SERVE_PUBLIC_PORT=<value>\n #\n port: 4433\n\n## log ##\n#\nlog:\n \n ## level ##\n #\n # One of:\n # - trace\n # - debug\n # - info\n # - warning\n # - error\n # - fatal\n # - panic\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export LOG_LEVEL=<value>\n # - Windows Command Line (CMD):\n # > set LOG_LEVEL=<value>\n #\n level: fatal\n\n ## Leak Sensitive Log Values ##\n #\n # If set will leak sensitive values (e.g. emails) in the logs.\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export LOG_LEAK_SENSITIVE_VALUES=<value>\n # - Windows Command Line (CMD):\n # > set LOG_LEAK_SENSITIVE_VALUES=<value>\n #\n leak_sensitive_values: true\n\n ## format ##\n #\n # One of:\n # - json\n # - text\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export LOG_FORMAT=<value>\n # - Windows Command Line (CMD):\n # > set LOG_FORMAT=<value>\n #\n format: json\n\n## secrets ##\n#\nsecrets:\n \n ## Default Encryption Signing Secrets ##\n #\n # The first secret in the array is used for singing and encrypting things while all other keys are used to verify and decrypt older things that were signed with that old secret.\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SECRETS_DEFAULT=<value>\n # - Windows Command Line (CMD):\n # > set SECRETS_DEFAULT=<value>\n #\n default:\n - inutminim etesse elit officia deserunt\n - minim pariatur ipsum\n - id qui aliquip tempor\n - Ut elit reprehenderit\n - consectetur aliqua Duis anim\n\n ## Singing Keys for Cookies ##\n #\n # The first secret in the array is used for encrypting cookies while all other keys are used to decrypt older cookies that were signed with that old secret.\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SECRETS_COOKIE=<value>\n # - Windows Command Line (CMD):\n # > set SECRETS_COOKIE=<value>\n #\n cookie:\n - sit pariatur esse fugiat\n - dolorconsequat est\n\n## Hashing Algorithm Configuration ##\n#\nhashers:\n \n ## Configuration for the Argon2id hasher. ##\n #\n argon2:\n \n ## memory ##\n #\n # Minimum value: 16384\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export HASHERS_ARGON2_MEMORY=<value>\n # - Windows Command Line (CMD):\n # > set HASHERS_ARGON2_MEMORY=<value>\n #\n memory: 73237545\n\n ## iterations ##\n #\n # Minimum value: 1\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export HASHERS_ARGON2_ITERATIONS=<value>\n # - Windows Command Line (CMD):\n # > set HASHERS_ARGON2_ITERATIONS=<value>\n #\n iterations: 73298526\n\n ## parallelism ##\n #\n # Minimum value: 1\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export HASHERS_ARGON2_PARALLELISM=<value>\n # - Windows Command Line (CMD):\n # > set HASHERS_ARGON2_PARALLELISM=<value>\n #\n parallelism: 97880149\n\n ## salt_length ##\n #\n # Minimum value: 16\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export HASHERS_ARGON2_SALT_LENGTH=<value>\n # - Windows Command Line (CMD):\n # > set HASHERS_ARGON2_SALT_LENGTH=<value>\n #\n salt_length: 6140095\n\n ## key_length ##\n #\n # Minimum value: 16\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export HASHERS_ARGON2_KEY_LENGTH=<value>\n # - Windows Command Line (CMD):\n # > set HASHERS_ARGON2_KEY_LENGTH=<value>\n #\n key_length: 94445349\n\n## session ##\n#\nsession:\n \n ## Session Lifespan ##\n #\n # Defines how long a session is active. Once that lifespan has been reached, the user needs to sign in again.\n #\n # Default value: 24h\n #\n # Examples:\n # - 1h\n # - 1m\n # - 1s\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SESSION_LIFESPAN=<value>\n # - Windows Command Line (CMD):\n # > set SESSION_LIFESPAN=<value>\n #\n lifespan: 1h\n\n ## cookie ##\n #\n cookie:\n \n ## Session Cookie Domain ##\n #\n # Sets the session cookie domain. Useful when dealing with subdomains. Use with care!\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SESSION_COOKIE_DOMAIN=<value>\n # - Windows Command Line (CMD):\n # > set SESSION_COOKIE_DOMAIN=<value>\n #\n domain: laborum pariatur\n\n ## Make Session Cookie Persistent ##\n #\n # If set to true will persist the cookie in the end-user's browser using the `max-age` parameter which is set to the `session.lifespan` value. Persistent cookies are not deleted when the browser is closed (e.g. on reboot or alt+f4).\n #\n # Default value: true\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SESSION_COOKIE_PERSISTENT=<value>\n # - Windows Command Line (CMD):\n # > set SESSION_COOKIE_PERSISTENT=<value>\n #\n persistent: false\n\n ## Session Cookie Path ##\n #\n # Sets the session cookie path. Use with care!\n #\n # Default value: /\n #\n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SESSION_COOKIE_PATH=<value>\n # - Windows Command Line (CMD):\n # > set SESSION_COOKIE_PATH=<value>\n #\n path: proident ea in\n\n ## Cookie Same Site Configuration ##\n #\n # Default value: Lax\n #\n # One of:\n # - Strict\n # - Lax\n # - None\n # \n # Set this value using environment variables on\n # - Linux/macOS:\n # $ export SESSION_COOKIE_SAME_SITE=<value>\n # - Windows Command Line (CMD):\n # > set SESSION_COOKIE_SAME_SITE=<value>\n #\n same_site: Lax\n\n## The kratos version this config is written for. ##\n#\n# SemVer according to https://semver.org/ prefixed with `v` as in our releases.\n#\n# Examples:\n# - v0.5.0-alpha.1\n# \n# Set this value using environment variables on\n# - Linux/macOS:\n# $ export VERSION=<value>\n# - Windows Command Line (CMD):\n# > set VERSION=<value>\n#\nversion: v0.5.0-alpha.1\n\n")))}c.isMDXComponent=!0},422:function(n,e,t){"use strict";t.d(e,"a",(function(){return m})),t.d(e,"b",(function(){return d}));var o=t(0),a=t.n(o);function s(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function i(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);e&&(o=o.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,o)}return t}function r(n){for(var e=1;e<arguments.length;e++){var t=null!=arguments[e]?arguments[e]:{};e%2?i(Object(t),!0).forEach((function(e){s(n,e,t[e])})):Object.getOwnPropertyDescriptors?Object.defineProperties(n,Object.getOwnPropertyDescriptors(t)):i(Object(t)).forEach((function(e){Object.defineProperty(n,e,Object.getOwnPropertyDescriptor(t,e))}))}return n}function l(n,e){if(null==n)return{};var t,o,a=function(n,e){if(null==n)return{};var t,o,a={},s=Object.keys(n);for(o=0;o<s.length;o++)t=s[o],e.indexOf(t)>=0||(a[t]=n[t]);return a}(n,e);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(n);for(o=0;o<s.length;o++)t=s[o],e.indexOf(t)>=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(a[t]=n[t])}return a}var u=a.a.createContext({}),c=function(n){var e=a.a.useContext(u),t=e;return n&&(t="function"==typeof n?n(e):r(r({},e),n)),t},m=function(n){var e=c(n.components);return a.a.createElement(u.Provider,{value:e},n.children)},S={inlineCode:"code",wrapper:function(n){var e=n.children;return a.a.createElement(a.a.Fragment,{},e)}},E=a.a.forwardRef((function(n,e){var t=n.components,o=n.mdxType,s=n.originalType,i=n.parentName,u=l(n,["components","mdxType","originalType","parentName"]),m=c(t),E=o,d=m["".concat(i,".").concat(E)]||m[E]||S[E]||s;return t?a.a.createElement(d,r(r({ref:e},u),{},{components:t})):a.a.createElement(d,r({ref:e},u))}));function d(n,e){var t=arguments,o=e&&e.mdxType;if("string"==typeof n||o){var s=t.length,i=new Array(s);i[0]=E;var r={};for(var l in e)hasOwnProperty.call(e,l)&&(r[l]=e[l]);r.originalType=n,r.mdxType="string"==typeof n?n:o,i[1]=r;for(var u=2;u<s;u++)i[u]=t[u];return a.a.createElement.apply(null,i)}return a.a.createElement.apply(null,t)}E.displayName="MDXCreateElement"}}]); |
||
validate.go | package jsonbody
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
)
func | (schemaJSON string) (map[string]interface{}, error) {
if schemaJSON == "" {
return nil, nil
}
var schemaMap map[string]interface{}
err := json.Unmarshal([]byte(schemaJSON), &schemaMap)
if err != nil {
log.Printf("jsonbody: failed to decode schema: %v\n", err)
return nil, errors.New("jsonbody: failed to decode schema")
}
return schemaMap, nil
}
func validateReqBody(expected map[string]interface{}, actual map[string]interface{}) []string {
if expected == nil {
return []string{}
}
if actual == nil {
return []string{"expected a JSON body"}
}
return validateObject("", expected, actual)
}
func validateObject(key string, expected map[string]interface{}, actual map[string]interface{}) []string {
if len(expected) == 0 {
return []string{}
}
errs := make([]string, 0)
for expectedKey, expectedVal := range expected {
optional := strings.HasPrefix(expectedKey, "?")
expectedKey = strings.TrimPrefix(expectedKey, "?")
var newKey string
if key == "" {
newKey = expectedKey
} else {
newKey = key + "." + expectedKey
}
actualVal, ok := actual[expectedKey]
if !optional && !ok {
errs = append(errs, fmt.Sprintf("expected key '%v' missing", newKey))
} else if ok {
errs = append(errs, validateSingle(newKey, expectedVal, actualVal)...)
}
}
return errs
}
func validateSingle(key string, expected interface{}, actual interface{}) []string {
errs := make([]string, 0)
switch expected := expected.(type) {
case string:
if _, ok := actual.(string); !ok {
errs = append(errs, fmt.Sprintf("value for key '%v' expected to be of type string", key))
}
case bool:
if _, ok := actual.(bool); !ok {
errs = append(errs, fmt.Sprintf("value for key '%v' expected to be of type boolean", key))
}
case float64:
if _, ok := actual.(float64); !ok {
errs = append(errs, fmt.Sprintf("value for key '%v' expected to be of type number", key))
}
case []interface{}:
if actualArray, ok := actual.([]interface{}); !ok {
errs = append(errs, fmt.Sprintf("value for key '%v' expected to be of type array", key))
} else {
errs = append(errs, validateArray(key, expected, actualArray)...)
}
case map[string]interface{}:
if actualObj, ok := actual.(map[string]interface{}); !ok {
errs = append(errs, fmt.Sprintf("value for key '%v' expected to be of type object", key))
} else {
errs = append(errs, validateObject(key, expected, actualObj)...)
}
}
return errs
}
func validateArray(key string, expected []interface{}, actual []interface{}) []string {
if len(expected) == 0 {
return []string{}
}
errs := make([]string, 0)
for i, actualVal := range actual {
errs = append(errs, validateSingle(fmt.Sprintf("%v[%v]", key, i), expected[0], actualVal)...)
}
return errs
}
| parseSchema |
app.py | import boto3
import json
import logging
from crhelper import CfnResource
logger = logging.getLogger(__name__)
helper = CfnResource(
json_logging=False, log_level='DEBUG', boto_level='CRITICAL')
try:
sc = boto3.client("servicecatalog")
except Exception as e:
helper.init_failure(e)
def get_parameters(event):
aws_account_id = event['StackId'].split(':')[4]
name = event['ResourceProperties']['Name']
ssm_doc_name = event['ResourceProperties']['SsmDocName']
ssm_doc_version = event['ResourceProperties']['SsmDocVersion']
assume_role = event['ResourceProperties']['AssumeRole']
return aws_account_id, name, ssm_doc_name, ssm_doc_version, assume_role
def create_provider(aws_account_id, name, ssm_doc_name, ssm_doc_version, assume_role):
response = sc.create_service_action(
Name=name,
Description=name,
DefinitionType='SSM_AUTOMATION',
Definition= {
"Name": ssm_doc_name,
"Version": ssm_doc_version,
"AssumeRole": assume_role,
"Parameters": "[{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]"
}
)
id = response['ServiceActionDetail']['ServiceActionSummary']['Id']
logger.info("created sc action " + id)
return id
@helper.create
def create(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
return create_provider(*get_parameters(event))
@helper.delete
def delete(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
id = event['PhysicalResourceId']
logger.info("deleting sc action " + id)
sc.delete_service_action(
Id=id
)
@helper.update
def update(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
new_properties = event['ResourceProperties']
old_properties = event['OldResourceProperties']
id = event['PhysicalResourceId']
if new_properties != old_properties:
response = sc.update_service_action(
Id=id,
Name=new_properties['Name'],
Description=new_properties['Name'],
Definition= {
"Name": new_properties['SsmDocName'],
"Version": new_properties['SsmDocVersion'],
"AssumeRole": new_properties['AssumeRole'],
"Parameters": "[{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]"
}
)
id = response['ServiceActionDetail']['ServiceActionSummary']['Id']
logger.info("updated sc action = " + id)
return id
def lambda_handler(event, context): | helper(event, context) |
|
deepLearningTorch.py | """
Created: 16 August 2018
Last Updated: 16 August 2018
Dan Marley
[email protected]
Texas A&M University
-----
Class for performing deep learning in pytorch
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT directly.
Instead, this is setup to use flat ntuples
that are accessed via uproot.
> UPROOT: https://github.com/scikit-hep/uproot
> KERAS: https://keras.io/
> TENSORFLOW: https://www.tensorflow.org/
> PYTORCH: http://pytorch.org/
> LWTNN: https://github.com/lwtnn/lwtnn
"""
import json
import util
import datetime
import collections
from deepLearning import DeepLearning
import uproot
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as tf
from torch.autograd import Variable
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_curve
class LeopardNet(nn.Module):
"""Neural Network for Leopard in PyTorch
Adapted from (16 August 2018)
https://github.com/thongonary/surf18-tutorial/blob/master/tuto-8-torch.ipynb
"""
def | (self,layers):
super(LeopardNet,self).__init__()
self.dense = nn.ModuleList()
for l,layer in enumerate(layers):
self.dense.append( nn.Linear(layer['in'],layer['out']) )
def forward(self, x):
"""All the computation steps of the input are defined in this function"""
nlayers = len(self.dense)
for i,d in enumerate(self.dense):
x = d(x)
x = tf.relu(x) if i!=nlayers-1 else tf.sigmoid(x)
return x
class DeepLearningTorch(DeepLearning):
"""Deep Learning pytorch class"""
def __init__(self):
DeepLearning.__init__(self)
## PyTorch objects
self.loss_fn = None # pytorch loss function
self.torch_opt = None # pytorch optimizer
def initialize(self): #,config):
"""Initialize a few parameters after they've been set by user"""
DeepLearning.initialize(self)
return
## Specific functions to perform training/inference tasks
def build_model(self):
"""Construct the NN model -- only Keras support for now"""
self.msg_svc.INFO("DLPYTORCH : Build the neural network model")
## Declare the model
layers = []
layers.append( {'in':int(self.input_dim),'out':int(self.nNodes[0])} )
for i,n in enumerate(self.nNodes):
if i==len(self.nNodes)-1: continue
layers.append( {'in':int(n),'out':int(self.nNodes[i+1])} )
layers.append( {'in':int(self.nNodes[-1]),'out':self.output_dim} )
self.model = LeopardNet(layers)
self.model.cuda()
self.loss_fn = torch.nn.BCELoss()
self.torch_opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) #1e-4)
return
def train_epoch(self,X,Y):
""""""
losses = []
for beg_i in range(0, len(X), self.batch_size):
x_batch = torch.from_numpy(X[beg_i:beg_i+self.batch_size,:])
y_batch = torch.from_numpy(Y[beg_i:beg_i+self.batch_size])
x_batch = Variable(x_batch).cuda()
y_batch = Variable(y_batch).float().unsqueeze_(-1).cuda() # modify dimensions (X,) -> (X,1)
self.torch_opt.zero_grad()
y_hat = self.model(x_batch) # forward
loss = self.loss_fn(y_hat, y_batch) # compute loss
loss.backward() # compute gradients
self.torch_opt.step() # update weights
losses.append(loss.data.cpu().numpy())
return losses
def train_model(self):
"""Setup for training the model using k-fold cross-validation"""
X = self.df[self.features].values
Y = self.df['target'].values
kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed)
nsplits = kfold.get_n_splits(X,Y)
cvpredictions = [] # compare outputs from each cross-validation
self.msg_svc.INFO("DLPYTORCH : Fitting K-Fold cross validations")
for ind,(train,test) in enumerate(kfold.split(X,Y)):
self.msg_svc.INFO("DLPYTORCH : - Fitting K-Fold {0}".format(ind))
Y_train = Y[train]
Y_test = Y[test]
# -- store test/train data from each k-fold as histograms (to compare later)
h_tests = {}
h_trains = {}
for n,v in self.targets.iteritems():
h_tests[n] = ROOT.TH1D("test_"+n,"test_"+n,10,0,10)
h_trains[n] = ROOT.TH1D("train_"+n,"train_"+n,10,0,10)
# fill histogram for each target
for (n,v) in enumerate(self.targets.iteritems()):
[h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]]
[h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]]
## Fit the model to training data & save the history
self.model.train()
e_losses = []
for t in range(self.epochs):
e_losses += self.train_epoch(X[train],Y_train)
self.msg_svc.INFO("DLPYTORCH : Epoch {0} -- Loss {1}".format(t,e_losses[-1]))
self.histories.append(e_losses)
# evaluate the model
self.msg_svc.DEBUG("DLPYTORCH : Evaluate the model: ")
self.model.eval()
# Evaluate training sample
self.msg_svc.INFO("DLPYTORCH : Predictions from training sample")
train_predictions = self.predict(X[train])
self.train_predictions.append(train_predictions)
# Evaluate test sample
self.msg_svc.INFO("DLPYTORCH : Predictions from testing sample")
test_predictions = self.predict(X[test])
self.test_predictions.append(test_predictions)
# Make ROC curve from test sample
self.msg_svc.INFO("DLPYTORCH : Make ROC curves")
fpr,tpr,_ = roc_curve(Y[test], test_predictions)
self.fpr.append(fpr)
self.tpr.append(tpr)
# Plot the predictions to compare test/train
self.msg_svc.INFO("DLPYTORCH : Plot the train/test predictions")
self.plotter.prediction(h_trains,h_tests) # compare DNN prediction for different targets
self.msg_svc.INFO("DLPYTORCH : Finished K-Fold cross-validation: ")
self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)}
self.msg_svc.INFO("DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)".format(np.mean(cvpredictions), np.std(cvpredictions)))
return
def predict(self,data=None):
"""Return the prediction from a test sample"""
self.msg_svc.DEBUG("DLPYTORCH : Get the DNN prediction")
if data is None:
self.msg_svc.ERROR("DLPYTORCH : predict() given NoneType data. Returning -999.")
return -999.
data = torch.from_numpy(data)
return self.model( Variable(data,volatile=True).cuda() )
def load_model(self,from_lwtnn=False):
"""Load existing model to make plots or predictions"""
output = self.output_dir+'/'+self.model_name
self.model.load_state_dict(torch.load(output))
self.model.eval()
return
def save_model(self,to_lwtnn=False):
"""Save the model for use later"""
output = self.output_dir+'/'+self.model_name
torch.save(self.model.state_dict(),output)
return
## THE END ##
| __init__ |
input.rs | pub mod input_event;
use winit::Window;
use vulkano::swapchain::Surface;
use std::sync::Arc;
use crate::settings::Settings;
use crate::input::input_event::InputEvent;
use std::cell::RefCell;
use std::rc::Rc;
use winit::EventsLoop;
use winit::Event;
use winit::WindowEvent;
use log::*;
/// Manages input. Fetches input events and manages window.
pub struct InputSystem {
events_loop: EventsLoop,
surface: Option<Arc<Surface<Window>>>,
}
impl InputSystem {
/// Creates new input system. At first surface is set to None because renderer is created after input system.
pub fn new() -> Self {
let events_loop = EventsLoop::new();
InputSystem {
events_loop: events_loop,
surface: None,
}
}
/// Returns a reference to the events loop.
pub fn events_loop(&self) -> &EventsLoop {
&self.events_loop
}
/// Sets the current surface.
pub fn set_surface(&mut self, surface: Arc<Surface<Window>>) {
self.surface = Some(surface);
}
/// Returns an Option with a reference to the application window.
pub fn window(&self) -> Option<&Window> {
self.surface.as_ref().map(|x| x.window())
}
/// Grabs cursor, preventing it from leaving the window.
pub fn grab_cursor(&self, value: bool) {
if let Some(window) = self.window() {
if let Err(err) = window.grab_cursor(value) {
error!("Error: {}", err);
}
}
}
/// Hides the cursor, making it invisible but still usable.
pub fn | (&mut self, value: bool) {
if let Some(window) = self.window() {
window.hide_cursor(value);
}
}
/// Loads pending events
pub fn fetch_pending_events(&mut self) -> Vec<Event> {
let mut events = Vec::new();
self.events_loop.poll_events(|input_event| {
events.push(input_event);
});
events
}
}
/// Converts winit events to InputEvents
pub fn convert_to_input_events(events: Vec<Event>) -> Vec<InputEvent> {
events.into_iter()
.filter_map(|event| input_event::to_input_event(event))
.collect()
}
| hide_cursor |
test_auth_emailactivation.py | from django.core.urlresolvers import resolve, reverse
from django.db import transaction
from django.test import TestCase
from django.test import Client
from django.utils import translation
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login, logout
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from django_tenants.test.cases import TenantTestCase
from django_tenants.test.client import TenantClient
from smegurus import constants
TEST_USER_EMAIL = "[email protected]" |
class APIEmailActivationTestCase(APITestCase, TenantTestCase):
fixtures = []
def setup_tenant(self, tenant):
"""Public Schema"""
tenant.schema_name = 'test'
tenant.name = "Galactic Alliance of Humankind"
tenant.has_perks=True
tenant.has_mentors=True
tenant.how_discovered = "Command HQ"
tenant.how_many_served = 1
@classmethod
def setUpTestData(cls):
Group.objects.bulk_create([
Group(id=constants.ENTREPRENEUR_GROUP_ID, name="Entreprenuer",),
Group(id=constants.MENTOR_GROUP_ID, name="Mentor",),
Group(id=constants.ADVISOR_GROUP_ID, name="Advisor",),
Group(id=constants.ORGANIZATION_MANAGER_GROUP_ID, name="Org Manager",),
Group(id=constants.ORGANIZATION_ADMIN_GROUP_ID, name="Org Admin",),
Group(id=constants.CLIENT_MANAGER_GROUP_ID, name="Client Manager",),
Group(id=constants.SYSTEM_ADMIN_GROUP_ID, name="System Admin",),
])
user = User.objects.create_user( # Create our User.
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
user.is_active = True
user.save()
@transaction.atomic
def setUp(self):
translation.activate('en') # Set English
super(APIEmailActivationTestCase, self).setUp()
self.c = TenantClient(self.tenant)
@transaction.atomic
def tearDown(self):
users = User.objects.all()
for user in users.all():
user.delete()
# super(APIEmailActivationTestCase, self).tearDown()
@transaction.atomic
def test_api_send_activation(self):
url = reverse('api_emailactivation')
data = {
'email': TEST_USER_EMAIL,
}
response = self.c.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
from django.core import mail
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, 'Den Activation')
@transaction.atomic
def test_api_send_activation_with_no_email(self):
url = reverse('api_emailactivation')
data = {
'email': '[email protected]',
}
response = self.c.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | TEST_USER_USERNAME = "ledo"
TEST_USER_PASSWORD = "GalacticAllianceOfHumankind" |
mod.rs | use nalgebra::{DMatrix, DVector};
use crate::algebra;
//-----------------------------------------------------------------------------
// TRAITS
/// Implemented by `Input -> Output` type pairs
///
/// Handles conversion to DMatrix type and stores information on associated output type.
/// Most methods of this library can currently work with the following `input -> ouput` pairs :
///
/// - `Vec<Vec<f64>> -> Vec<f64>` each inner vector is a multidimentional training sample
/// - `Vec<f64> -> f64` a single multidimensional sample
/// - `DMatrix<f64> -> DVector<f64>` using a [nalgebra](https://www.nalgebra.org/) matrix with one row per sample
/// | /// User-defined input type should implement this trait.
pub trait Input: Sized
{
/// type of the vectors storing training output data and given to methods
type InVector: Sized;
/// type of the vectors outputed when a method is called
type OutVector;
/// Converts an input matrix to a DMatrix.
fn to_dmatrix(m: &Self) -> DMatrix<f64>;
/// Optional: converts an owned input matrix to a DMatrix.
/// This function is used for to reduce copies when the input type is compatible with DMatrix.
fn into_dmatrix(m: Self) -> DMatrix<f64>
{
Self::to_dmatrix(&m)
}
/// Converts an input vector to a DVector.
fn to_dvector(v: &Self::InVector) -> DVector<f64>;
/// Optional: converts an owned input vector to a DVector.
/// This function is used for to reduce copies when the input type is compatible with DVector.
fn into_dvector(v: Self::InVector) -> DVector<f64>
{
Self::to_dvector(&v)
}
/// converts a DVector to an output vector.
fn from_dvector(v: &DVector<f64>) -> Self::OutVector;
}
//-----------------------------------------------------------------------------
// IMPLEMENTATIONS
/// direct implementation
impl Input for DMatrix<f64>
{
type InVector = DVector<f64>;
type OutVector = DVector<f64>;
/// converts an input matrix to a DMatrix
fn to_dmatrix(m: &Self) -> DMatrix<f64>
{
m.clone()
}
/// converts an input vector to a DVector
fn to_dvector(v: &Self::InVector) -> DVector<f64>
{
v.clone()
}
/// converts an input matrix to a DMatrix
fn into_dmatrix(m: Self) -> DMatrix<f64>
{
m
}
/// converts an input vector to a DVector
fn into_dvector(v: Self::InVector) -> DVector<f64>
{
v
}
/// converts a DVector to an output vector
fn from_dvector(v: &DVector<f64>) -> Self::OutVector
{
v.clone()
}
}
/// single row
impl Input for Vec<f64>
{
type InVector = f64;
type OutVector = f64;
/// converts an input matrix to a DMatrix
fn to_dmatrix(m: &Self) -> DMatrix<f64>
{
DMatrix::from_row_slice(1, m.len(), m)
}
/// converts an input vector to a DVector
fn to_dvector(v: &Self::InVector) -> DVector<f64>
{
DVector::from_element(1, *v)
}
/// converts a DVector to an output vector
fn from_dvector(v: &DVector<f64>) -> Self::OutVector
{
assert_eq!(v.nrows(), 1);
v[0]
}
}
/// multiple rows, base rust type
impl Input for Vec<Vec<f64>>
{
type InVector = Vec<f64>;
type OutVector = Vec<f64>;
/// converts an input matrix to a DMatrix
fn to_dmatrix(m: &Self) -> DMatrix<f64>
{
algebra::make_matrix_from_row_slices(m)
}
/// converts an input vector to a DVector
fn to_dvector(v: &Self::InVector) -> DVector<f64>
{
DVector::from_column_slice(v)
}
/// converts a DVector to an output vector
fn from_dvector(v: &DVector<f64>) -> Self::OutVector
{
v.iter().cloned().collect()
}
} | |
mod.rs | use crate::assets::{Assets, MeshAndAttributes};
use crate::shader::Shader;
use crate::shader::ShaderKind;
use crate::shader::ShaderKind::NonSkinnedNonTextured;
use crate::shader::ShaderSystem;
use crate::state_wrapper::State;
use blender_mesh::{BlenderMesh, CreateSingleIndexConfig, MaterialInput};
use js_sys::WebAssembly;
use nalgebra::{Isometry3, Vector3};
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::JsCast;
use web_sys::WebGlRenderingContext as GL;
use web_sys::*;
mod armature_render;
mod mesh_render;
pub struct Renderer {
gl: Rc<WebGlRenderingContext>,
assets: Rc<RefCell<Assets>>,
shader_sys: Rc<ShaderSystem>,
}
#[derive(Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub enum VaoKey {
// TODO: Instead of String, an enum that's auto generated from mesh names in the Blender
// files via build.rs
MeshName(String),
}
pub enum RenderInstructions {
DrawElements { num_indices: i32 },
}
pub trait Renderable {
fn shader_kind(&self) -> ShaderKind;
fn vao_key(&self) -> VaoKey;
fn buffer_attributes(&self, gl: &WebGlRenderingContext, shader: &Shader);
fn set_uniforms(
&self,
gl: &WebGlRenderingContext,
shader: &Shader,
state: &State,
) -> RenderInstructions;
}
trait BlenderMeshRender {
fn render_non_skinned(&self, gl: &WebGlRenderingContext, shader_program: &Shader);
fn render_dual_quat_skinned(&self, gl: &WebGlRenderingContext, shader_program: &Shader);
}
struct Attrubute<T>(T);
struct Uniform<T>(T);
// TODO: These types can probably be automatically generated based on the shader
struct NonSkinnedMesh<'a> {
blender_mesh: &'a MeshAndAttributes,
name: String,
}
impl<'a> Renderable for NonSkinnedMesh<'a> {
fn shader_kind(&self) -> ShaderKind {
// if let Some(_) = self.armature_name {
// ShaderKind::DualQuatSkin
// } else {
// ShaderKind::NonSkinned
// }
if let Some(_) = self.blender_mesh.vertex_uvs {
ShaderKind::NonSkinnedWithTexture
} else {
ShaderKind::NonSkinnedNonTextured
}
}
fn vao_key(&self) -> VaoKey |
fn buffer_attributes(&self, gl: &WebGlRenderingContext, shader: &Shader) {
let pos_attrib =
gl.get_attrib_location(&shader.program.as_ref().unwrap(), "aVertexPosition");
let normal_attrib =
gl.get_attrib_location(&shader.program.as_ref().unwrap(), "aVertexNormal");
gl.enable_vertex_attrib_array(pos_attrib as u32);
gl.enable_vertex_attrib_array(normal_attrib as u32);
if let Some(ref uvs) = self.blender_mesh.vertex_uvs.as_ref() {
let uv_attrib =
gl.get_attrib_location(shader.program.as_ref().unwrap(), "aTextureCoord");
gl.enable_vertex_attrib_array(uv_attrib as u32);
GpuBufferer::buffer_f32_data(&gl, &uvs[..], uv_attrib as u32, 2);
}
let mesh = self.blender_mesh;
GpuBufferer::buffer_f32_data(&gl, &mesh.vertex_positions[..], pos_attrib as u32, 3);
GpuBufferer::buffer_f32_data(&gl, &mesh.vertex_normals[..], normal_attrib as u32, 3);
GpuBufferer::buffer_u16_indices(&gl, &mesh.vertex_position_indices[..]);
}
fn set_uniforms(
&self,
gl: &WebGlRenderingContext,
shader: &Shader,
state: &State,
) -> RenderInstructions {
// TODO: Cache uniform locations in the Shader.
let perspective_uni =
gl.get_uniform_location(shader.program.as_ref().unwrap(), "perspective");
let perspective = state.camera().projection();
gl.uniform_matrix4fv_with_f32_array(perspective_uni.as_ref(), false, &perspective);
let view = state.camera().view();
let view_uni = gl.get_uniform_location(shader.program.as_ref().unwrap(), "view");
let view_uni = view_uni.as_ref();
let pos = (0.0, 0.0, 0.0);
let model = Isometry3::new(Vector3::new(pos.0, pos.1, pos.2), nalgebra::zero());
let model = model.to_homogeneous();
let mut model_array = [0.; 16];
model_array.copy_from_slice(model.as_slice());
let model_uni = gl.get_uniform_location(shader.program.as_ref().unwrap(), "model");
let model_uni = model_uni.as_ref();
gl.uniform_matrix4fv_with_f32_array(model_uni, false, &mut model_array);
gl.uniform_matrix4fv_with_f32_array(view_uni, false, &view);
// FIXME: Add materials to both shaders .. not just non textured ..
if self.shader_kind() == NonSkinnedNonTextured {
let base_color_uni =
gl.get_uniform_location(shader.program.as_ref().unwrap(), "baseColor");
let base_color = match self.blender_mesh.materials().iter().next() {
Some((_, material)) => match material.base_color() {
MaterialInput::Uniform(color) => color,
_ => &[0.8, 0.552, 0.017],
},
None => &[0.8, 0.552, 0.017],
};
gl.uniform3fv_with_f32_array(base_color_uni.as_ref(), base_color);
}
let camera_pos_uni =
gl.get_uniform_location(shader.program.as_ref().unwrap(), "uCameraPos");
let camera_pos = state.camera().get_eye_pos();
gl.uniform3fv_with_f32_array(
camera_pos_uni.as_ref(),
&[camera_pos[0], camera_pos[1], camera_pos[2]],
);
let roughness_uni = gl.get_uniform_location(shader.program.as_ref().unwrap(), "roughness");
gl.uniform1f(roughness_uni.as_ref(), state.roughness());
let metallic_uni = gl.get_uniform_location(shader.program.as_ref().unwrap(), "metallic");
gl.uniform1f(metallic_uni.as_ref(), state.metallic());
let light_pos_uni = gl.get_uniform_location(shader.program.as_ref().unwrap(), "lightPos");
gl.uniform3fv_with_f32_array(light_pos_uni.as_ref(), &[1.1, 1.1, 1.1]);
let light_color_uni =
gl.get_uniform_location(shader.program.as_ref().unwrap(), "lightColor");
gl.uniform3fv_with_f32_array(light_color_uni.as_ref(), &[1.0, 1.0, 1.0]);
let num_indices = self.blender_mesh.vertex_position_indices.len() as i32;
RenderInstructions::DrawElements { num_indices }
}
}
impl Renderer {
pub fn new(
gl: Rc<WebGlRenderingContext>,
assets: Rc<RefCell<Assets>>,
shader_sys: Rc<ShaderSystem>,
) -> Renderer {
Renderer {
gl,
assets,
shader_sys,
}
}
pub fn render(&self, state: &State) {
let gl = &self.gl;
gl.clear(GL::COLOR_BUFFER_BIT | GL::DEPTH_BUFFER_BIT);
let mesh = self.assets.borrow().meshes();
let mesh = mesh.borrow();
let mesh = mesh.get(state.current_model.as_str());
if mesh.is_none() {
return;
}
let mesh = mesh.unwrap();
let renderable_mesh = NonSkinnedMesh {
blender_mesh: &mesh,
name: "Foo".to_string(),
};
self.shader_sys.use_program(&renderable_mesh.shader_kind());
let shader = self.shader_sys.get_shader(&renderable_mesh.shader_kind());
if shader.is_none() {
return;
}
let shader = shader.unwrap();
// if mesh.armature_name.is_some() {
// let armature = self.assets.borrow().armatures();
// let armature = armature.borrow();
// let armature = armature.get(mesh.armature_name.as_ref().unwrap());
//
// if armature.is_none() {
// return;
// }
//
// armature.unwrap().buffer_data(&self.gl, shader, &state);
// }
// TODO: Use VAOs and only buffer attributes once.
renderable_mesh.buffer_attributes(&self.gl, shader);
match renderable_mesh.set_uniforms(&self.gl, shader, state) {
RenderInstructions::DrawElements { num_indices } => {
gl.draw_elements_with_i32(GL::TRIANGLES, num_indices, GL::UNSIGNED_SHORT, 0);
}
}
}
}
pub struct GpuBufferer;
impl GpuBufferer {
pub fn buffer_f32_data(gl: &GL, data: &[f32], attrib: u32, size: i32) {
let memory_buffer = wasm_bindgen::memory()
.dyn_into::<WebAssembly::Memory>()
.unwrap()
.buffer();
let data_location = data.as_ptr() as u32 / 4;
let data_array = js_sys::Float32Array::new(&memory_buffer)
.subarray(data_location, data_location + data.len() as u32);
let buffer = gl.create_buffer().unwrap();
gl.bind_buffer(GL::ARRAY_BUFFER, Some(&buffer));
gl.buffer_data_with_array_buffer_view(GL::ARRAY_BUFFER, &data_array, GL::STATIC_DRAW);
gl.vertex_attrib_pointer_with_i32(attrib, size, GL::FLOAT, false, 0, 0);
}
pub fn buffer_u8_data(gl: &GL, data: &[u8], attrib: u32, size: i32) {
let memory_buffer = wasm_bindgen::memory()
.dyn_into::<WebAssembly::Memory>()
.unwrap()
.buffer();
let data_location = data.as_ptr() as u32;
let data_array = js_sys::Uint8Array::new(&memory_buffer)
.subarray(data_location, data_location + data.len() as u32);
let buffer = gl.create_buffer().unwrap();
gl.bind_buffer(GL::ARRAY_BUFFER, Some(&buffer));
gl.buffer_data_with_array_buffer_view(GL::ARRAY_BUFFER, &data_array, GL::STATIC_DRAW);
gl.vertex_attrib_pointer_with_i32(attrib, size, GL::UNSIGNED_BYTE, false, 0, 0);
}
pub fn buffer_u16_indices(gl: &GL, indices: &[u16]) {
let memory_buffer = wasm_bindgen::memory()
.dyn_into::<WebAssembly::Memory>()
.unwrap()
.buffer();
let indices_location = indices.as_ptr() as u32 / 2;
let indices_array = js_sys::Uint16Array::new(&memory_buffer)
.subarray(indices_location, indices_location + indices.len() as u32);
let index_buffer = gl.create_buffer().unwrap();
gl.bind_buffer(GL::ELEMENT_ARRAY_BUFFER, Some(&index_buffer));
gl.buffer_data_with_array_buffer_view(
GL::ELEMENT_ARRAY_BUFFER,
&indices_array,
GL::STATIC_DRAW,
);
}
}
#[cfg(test)]
mod tests {
#[test]
fn fo() {}
}
| {
VaoKey::MeshName(self.name.clone())
} |
user.js | import request from '@/utils/request'
export function login(data) {
return request({
url: '/login',
method: 'post',
data
})
}
export function getInfo() {
return request({
url: '/getInfo',
method: 'get',
})
}
export function logout() {
return request({ | }
export function uploadData(data) {
return request({
url: '/uploadStudent',
method: 'post',
data,
})
}
export function searchStudent(data) {
return request({
url: '/searchStudent',
method: 'post',
data
})
}
export function updateStudentData(data) {
return request({
url: '/updateStudentData',
method: 'post',
data
})
}
export function applyOut(data) {
return request({
url: '/applyOut',
method: 'post',
data
})
}
export function applyBack(data) {
return request({
url: '/applyBack',
method: 'post',
data
})
}
export function applyCode(number) {
return request({
url: `/applyCode?number=${number}`,
method: 'get',
})
}
export function applyList(passed) {
return request({
url: `/applyList?number=${passed}`,
method: 'get',
})
} | url: '/logout',
method: 'post'
}) |
app.component.ts | import { Component } from '@angular/core';
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css'] | })
export class AppComponent {
title = 'UCDScheduleSaver';
fav = [];
} | |
json.go | package reportoutput
import (
"encoding/json"
"github.com/gildub/phronetic/pkg/io"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func | (r ReportOutput) {
jsonReports, err := json.MarshalIndent(r, "", " ")
if err != nil {
panic(errors.Wrap(err, "unable to marshal reports"))
}
if err := io.WriteFile(jsonReports, jsonFileName); err != nil {
panic(errors.Wrapf(err, "unable to write to report file: %s", jsonFileName))
}
logrus.Infof("Report:Added: %s", jsonFileName)
}
| jsonOutput |
addon-service.ts | import memoizee from 'memoizee';
import { ValidationError } from 'joi';
import { getAddons, IAddonProviders } from '../addons';
import * as events from '../types/events';
import { addonSchema } from './addon-schema';
import NameExistsError from '../error/name-exists-error';
import { IEventStore } from '../types/stores/event-store';
import { IFeatureToggleStore } from '../types/stores/feature-toggle-store';
import { Logger } from '../logger';
import TagTypeService from './tag-type-service';
import { IAddon, IAddonDto, IAddonStore } from '../types/stores/addon-store';
import { IUnleashStores } from '../types/stores';
import { IUnleashConfig } from '../types/option';
import { IAddonDefinition } from '../types/model';
const SUPPORTED_EVENTS = Object.keys(events).map((k) => events[k]);
const ADDONS_CACHE_TIME = 60 * 1000; // 60s
const MASKED_VALUE = '*****';
interface ISensitiveParams {
[key: string]: string[];
}
export default class | {
eventStore: IEventStore;
addonStore: IAddonStore;
featureToggleStore: IFeatureToggleStore;
logger: Logger;
tagTypeService: TagTypeService;
addonProviders: IAddonProviders;
sensitiveParams: ISensitiveParams;
fetchAddonConfigs: (() => Promise<IAddon[]>) &
memoizee.Memoized<() => Promise<IAddon[]>>;
constructor(
{
addonStore,
eventStore,
featureToggleStore,
}: Pick<
IUnleashStores,
'addonStore' | 'eventStore' | 'featureToggleStore'
>,
{ getLogger, server }: Pick<IUnleashConfig, 'getLogger' | 'server'>,
tagTypeService: TagTypeService,
addons?: IAddonProviders,
) {
this.eventStore = eventStore;
this.addonStore = addonStore;
this.featureToggleStore = featureToggleStore;
this.logger = getLogger('services/addon-service.js');
this.tagTypeService = tagTypeService;
this.addonProviders =
addons ||
getAddons({
getLogger,
unleashUrl: server.unleashUrl,
});
this.sensitiveParams = this.loadSensitiveParams(this.addonProviders);
if (addonStore) {
this.registerEventHandler();
}
// Memoized private function
this.fetchAddonConfigs = memoizee(
async () => addonStore.getAll({ enabled: true }),
{
promise: true,
maxAge: ADDONS_CACHE_TIME,
},
);
}
loadSensitiveParams(addonProviders: IAddonProviders): ISensitiveParams {
const providerDefinitions = Object.values(addonProviders).map(
(p) => p.definition,
);
return providerDefinitions.reduce((obj, definition) => {
const sensitiveParams = definition.parameters
.filter((p) => p.sensitive)
.map((p) => p.name);
const o = { ...obj };
o[definition.name] = sensitiveParams;
return o;
}, {});
}
registerEventHandler(): void {
SUPPORTED_EVENTS.forEach((eventName) =>
this.eventStore.on(eventName, this.handleEvent(eventName)),
);
}
handleEvent(eventName: string): (IEvent) => void {
const { addonProviders } = this;
return (event) => {
this.fetchAddonConfigs().then((addonInstances) => {
addonInstances
.filter((addon) => addon.events.includes(eventName))
.filter((addon) => addonProviders[addon.provider])
.forEach((addon) =>
addonProviders[addon.provider].handleEvent(
event,
addon.parameters,
),
);
});
};
}
// Should be used by the controller.
async getAddons(): Promise<IAddon[]> {
const addonConfigs = await this.addonStore.getAll();
return addonConfigs.map((a) => this.filterSensitiveFields(a));
}
filterSensitiveFields(addonConfig: IAddon): IAddon {
const { sensitiveParams } = this;
const a = { ...addonConfig };
a.parameters = Object.keys(a.parameters).reduce((obj, paramKey) => {
const o = { ...obj };
if (sensitiveParams[a.provider].includes(paramKey)) {
o[paramKey] = MASKED_VALUE;
} else {
o[paramKey] = a.parameters[paramKey];
}
return o;
}, {});
return a;
}
async getAddon(id: number): Promise<IAddon> {
const addonConfig = await this.addonStore.get(id);
return this.filterSensitiveFields(addonConfig);
}
getProviderDefinitions(): IAddonDefinition[] {
const { addonProviders } = this;
return Object.values(addonProviders).map((p) => p.definition);
}
async addTagTypes(providerName: string): Promise<void> {
const provider = this.addonProviders[providerName];
if (provider) {
const tagTypes = provider.definition.tagTypes || [];
const createTags = tagTypes.map(async (tagType) => {
try {
await this.tagTypeService.validateUnique(tagType);
await this.tagTypeService.createTagType(
tagType,
providerName,
);
} catch (err) {
if (!(err instanceof NameExistsError)) {
this.logger.error(err);
}
}
});
await Promise.all(createTags);
}
return Promise.resolve();
}
async createAddon(data: IAddonDto, userName: string): Promise<IAddon> {
const addonConfig = await addonSchema.validateAsync(data);
await this.validateKnownProvider(addonConfig);
await this.validateRequiredParameters(addonConfig);
const createdAddon = await this.addonStore.insert(addonConfig);
await this.addTagTypes(createdAddon.provider);
this.logger.info(
`User ${userName} created addon ${addonConfig.provider}`,
);
await this.eventStore.store({
type: events.ADDON_CONFIG_CREATED,
createdBy: userName,
data: { provider: addonConfig.provider },
});
return createdAddon;
}
async updateAddon(
id: number,
data: IAddonDto,
userName: string,
): Promise<void> {
const addonConfig = await addonSchema.validateAsync(data);
await this.validateRequiredParameters(addonConfig);
if (this.sensitiveParams[addonConfig.provider].length > 0) {
const existingConfig = await this.addonStore.get(id);
addonConfig.parameters = Object.keys(addonConfig.parameters).reduce(
(params, key) => {
const o = { ...params };
if (addonConfig.parameters[key] === MASKED_VALUE) {
o[key] = existingConfig.parameters[key];
} else {
o[key] = addonConfig.parameters[key];
}
return o;
},
{},
);
}
await this.addonStore.update(id, addonConfig);
await this.eventStore.store({
type: events.ADDON_CONFIG_UPDATED,
createdBy: userName,
data: { id, provider: addonConfig.provider },
});
this.logger.info(`User ${userName} updated addon ${id}`);
}
async removeAddon(id: number, userName: string): Promise<void> {
await this.addonStore.delete(id);
await this.eventStore.store({
type: events.ADDON_CONFIG_DELETED,
createdBy: userName,
data: { id },
});
this.logger.info(`User ${userName} removed addon ${id}`);
}
async validateKnownProvider(config: Partial<IAddonDto>): Promise<boolean> {
const p = this.addonProviders[config.provider];
if (!p) {
throw new TypeError(`Unknown addon provider ${config.provider}`);
} else {
return true;
}
}
// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types
async validateRequiredParameters({
provider,
parameters,
}): Promise<boolean> {
const providerDefinition = this.addonProviders[provider].definition;
const requiredParamsMissing = providerDefinition.parameters
.filter((p) => p.required)
.map((p) => p.name)
.filter(
(requiredParam) =>
!Object.keys(parameters).includes(requiredParam),
);
if (requiredParamsMissing.length > 0) {
throw new ValidationError(
`Missing required parameters: ${requiredParamsMissing.join(
',',
)} `,
'',
undefined,
);
}
return true;
}
}
| AddonService |
CreateServiceSpecificCredentialCommand.ts | import * as __aws_sdk_middleware_stack from "@aws-sdk/middleware-stack";
import * as __aws_sdk_types from "@aws-sdk/types";
import * as _stream from "stream";
import { CreateServiceSpecificCredential } from "../model/operations/CreateServiceSpecificCredential";
import { InputTypesUnion } from "../types/InputTypesUnion";
import { OutputTypesUnion } from "../types/OutputTypesUnion";
import { CreateServiceSpecificCredentialInput } from "../types/CreateServiceSpecificCredentialInput";
import { CreateServiceSpecificCredentialOutput } from "../types/CreateServiceSpecificCredentialOutput";
import { IAMResolvedConfiguration } from "../IAMConfiguration";
export * from "../types/CreateServiceSpecificCredentialInput";
export * from "../types/CreateServiceSpecificCredentialOutput";
export * from "../types/CreateServiceSpecificCredentialExceptionsUnion";
export class |
implements
__aws_sdk_types.Command<
InputTypesUnion,
CreateServiceSpecificCredentialInput,
OutputTypesUnion,
CreateServiceSpecificCredentialOutput,
IAMResolvedConfiguration,
_stream.Readable
> {
readonly model = CreateServiceSpecificCredential;
readonly middlewareStack = new __aws_sdk_middleware_stack.MiddlewareStack<
CreateServiceSpecificCredentialInput,
CreateServiceSpecificCredentialOutput,
_stream.Readable
>();
constructor(readonly input: CreateServiceSpecificCredentialInput) {}
resolveMiddleware(
clientStack: __aws_sdk_middleware_stack.MiddlewareStack<
InputTypesUnion,
OutputTypesUnion,
_stream.Readable
>,
configuration: IAMResolvedConfiguration
): __aws_sdk_types.Handler<
CreateServiceSpecificCredentialInput,
CreateServiceSpecificCredentialOutput
> {
const { handler } = configuration;
const stack = clientStack.concat(this.middlewareStack);
const handlerExecutionContext: __aws_sdk_types.HandlerExecutionContext = {
logger: {} as any,
model: this.model
};
return stack.resolve(
handler<
CreateServiceSpecificCredentialInput,
CreateServiceSpecificCredentialOutput
>(handlerExecutionContext),
handlerExecutionContext
);
}
}
| CreateServiceSpecificCredentialCommand |
main.go | // Copyright 2016 Nemanja Zbiljic
//
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
const (
description = "CLI tool to search artifacts in maven central repository"
version = "0.2.0"
)
var (
fGroupId = flag.String("g", "", "specify groupId")
fArtifactId = flag.String("a", "", "specify artifactId")
fVersion = flag.String("v", "", "specify version")
fAllVersions = flag.Bool("A", false, "show all versions")
fMax = flag.Int("m", 20, "limit number of result")
)
var Usage = func() {
fmt.Fprintf(os.Stderr, "NAME:\n %s - %s\n", os.Args[0], description)
fmt.Fprint(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "USAGE:\n %s [options] [query]\n", os.Args[0])
fmt.Fprint(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "VERSION:\n %s\n", version)
fmt.Fprint(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "OPTIONS:\n")
flag.PrintDefaults()
fmt.Fprint(os.Stderr, "\n")
}
////////////////////////////////////////////////////////////////////////
// Helpers
////////////////////////////////////////////////////////////////////////
// JSONResponse solrsearch response.
type JSONResponse struct {
Response struct {
Docs []struct {
ID string
LatestVersion string
Timestamp int64
}
}
}
var maxVLen int
func collect(data JSONResponse) {
// calculate row size
for _, d := range data.Response.Docs {
vlen := 4 // + 4 spaces
vlen += len(d.ID)
vlen += len(d.LatestVersion)
if vlen > maxVLen {
maxVLen = vlen
}
}
// print results
for _, d := range data.Response.Docs {
var line string
if len(d.LatestVersion) == 0 {
line = fmt.Sprintf("%s", d.ID)
} else {
line = fmt.Sprintf("%s:%s", d.ID, d.LatestVersion)
}
fmt.Printf("compile '%s'", color(line))
fillLine(line)
fmt.Printf("%6s", msToTime(d.Timestamp).Format("2006-01-02"))
fmt.Println()
}
}
func color(s string) string {
id := strings.Split(s, ":")
return fmt.Sprintf("%s:%s:%s", colorGroupId(id[0]), colorArtifactId(id[1]), colorVersion(id[2]))
}
func colorGroupId(s string) string {
return fmt.Sprintf("%s%s%s", "\x1b[32m", s, "\x1b[0m")
}
func colorArtifactId(s string) string {
return fmt.Sprintf("%s%s%s", "\x1b[35m", s, "\x1b[0m")
}
func colorVersion(s string) string {
return fmt.Sprintf("%s%s%s%s", "\x1b[34m", "\x1b[1m", s, "\x1b[0m")
}
func msToTime(millis int64) time.Time {
return time.Unix(0, millis*int64(time.Millisecond))
}
func fillLine(line string) {
count := maxVLen - len(line)
for i := 0; i < count; i++ {
fmt.Print(" ")
}
}
func request(params url.Values) (JSONResponse, error) {
endpoint := "https://search.maven.org/solrsearch/select?" + params.Encode()
res := JSONResponse{}
resp, err := http.Get(endpoint)
if err != nil {
return res, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return res, errors.New(strconv.Itoa(resp.StatusCode))
}
// parse JSON with anonymous struct.
d := json.NewDecoder(resp.Body)
if err := d.Decode(&res); err != nil {
return res, err
}
return res, nil
}
func | (q string) url.Values {
query := make([]string, 0)
if len(q) > 0 {
query = append(query, q)
}
groupId := *fGroupId
if len(groupId) > 0 {
query = append(query, appendQuery(query, "g", groupId))
}
artifactId := *fArtifactId
if len(artifactId) > 0 {
query = append(query, appendQuery(query, "a", artifactId))
}
version := *fVersion
if len(version) > 0 {
query = append(query, appendQuery(query, "v", version))
}
params := url.Values{
"wt": []string{"json"},
"rows": []string{strconv.Itoa(*fMax)},
"q": []string{strings.Join(query, "")},
}
if *fAllVersions {
params["core"] = []string{"gav"}
}
return params
}
func appendQuery(query []string, key, value string) string {
if len(query) == 0 {
return fmt.Sprintf("%s:\"%s\"", key, value)
}
return fmt.Sprintf(" AND %s:\"%s\"", key, value)
}
func containsNoQueryOptions() bool {
if len(*fGroupId) > 0 {
return true
}
if len(*fArtifactId) > 0 {
return true
}
return false
}
////////////////////////////////////////////////////////////////////////
// main logic
////////////////////////////////////////////////////////////////////////
func run(args []string) (err error) {
// Handles cases when too many arguments passed, and no arguments passed
// but also no (then) required options
if (len(args) > 2) || (len(args) < 1 && !containsNoQueryOptions()) {
err = fmt.Errorf("Usage: %s [options] [query]", os.Args[0])
return
}
var query string
if len(args) < 1 {
query = ""
} else {
query = args[0]
}
params := formatParams(query)
data, err1 := request(params)
if err1 != nil {
fmt.Printf("Returns status code %s. Please try it again.\n", err1)
}
collect(data)
fmt.Println()
return
}
func main() {
flag.Usage = Usage
flag.Parse()
err := run(flag.Args())
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
| formatParams |
signal.rs | // Portions of this file are Copyright 2014 The Rust Project Developers.
// See http://rust-lang.org/COPYRIGHT.
use libc;
use {Errno, Error, Result};
use std::mem;
use std::ptr;
// Currently there is only one definition of c_int in libc, as well as only one
// type for signal constants.
// We would prefer to use the libc::c_int alias in the repr attribute. Unfortunately
// this is not (yet) possible.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[repr(i32)]
pub enum Signal {
SIGHUP = libc::SIGHUP,
SIGINT = libc::SIGINT,
SIGQUIT = libc::SIGQUIT,
SIGILL = libc::SIGILL,
SIGTRAP = libc::SIGTRAP,
SIGABRT = libc::SIGABRT,
SIGBUS = libc::SIGBUS,
SIGFPE = libc::SIGFPE,
SIGKILL = libc::SIGKILL,
SIGUSR1 = libc::SIGUSR1,
SIGSEGV = libc::SIGSEGV,
SIGUSR2 = libc::SIGUSR2,
SIGPIPE = libc::SIGPIPE,
SIGALRM = libc::SIGALRM,
SIGTERM = libc::SIGTERM,
#[cfg(all(any(target_os = "linux", target_os = "android", target_os = "emscripten"), not(target_arch = "mips")))]
SIGSTKFLT = libc::SIGSTKFLT,
SIGCHLD = libc::SIGCHLD,
SIGCONT = libc::SIGCONT,
SIGSTOP = libc::SIGSTOP,
SIGTSTP = libc::SIGTSTP,
SIGTTIN = libc::SIGTTIN,
SIGTTOU = libc::SIGTTOU,
SIGURG = libc::SIGURG,
SIGXCPU = libc::SIGXCPU,
SIGXFSZ = libc::SIGXFSZ,
SIGVTALRM = libc::SIGVTALRM,
SIGPROF = libc::SIGPROF,
SIGWINCH = libc::SIGWINCH,
SIGIO = libc::SIGIO,
#[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))]
SIGPWR = libc::SIGPWR,
SIGSYS = libc::SIGSYS,
#[cfg(not(any(target_os = "linux", target_os = "android", target_os = "emscripten")))]
SIGEMT = libc::SIGEMT,
#[cfg(not(any(target_os = "linux", target_os = "android", target_os = "emscripten")))]
SIGINFO = libc::SIGINFO,
}
pub use self::Signal::*;
#[cfg(all(any(target_os = "linux", target_os = "android", target_os = "emscripten"), not(target_arch = "mips")))]
const SIGNALS: [Signal; 31] = [
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGSTKFLT,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS];
#[cfg(all(any(target_os = "linux", target_os = "android", target_os = "emscripten"), target_arch = "mips"))]
const SIGNALS: [Signal; 30] = [
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS];
#[cfg(not(any(target_os = "linux", target_os = "android", target_os = "emscripten")))]
const SIGNALS: [Signal; 31] = [
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGSYS,
SIGEMT,
SIGINFO];
pub const NSIG: libc::c_int = 32;
pub struct SignalIterator {
next: usize,
}
impl Iterator for SignalIterator {
type Item = Signal;
fn next(&mut self) -> Option<Signal> {
if self.next < SIGNALS.len() {
let next_signal = SIGNALS[self.next];
self.next += 1;
Some(next_signal)
} else {
None
}
}
}
impl Signal {
pub fn iterator() -> SignalIterator {
SignalIterator{next: 0}
}
// We do not implement the From trait, because it is supposed to be infallible.
// With Rust RFC 1542 comes the appropriate trait TryFrom. Once it is
// implemented, we'll replace this function.
#[inline]
pub fn from_c_int(signum: libc::c_int) -> Result<Signal> {
match 0 < signum && signum < NSIG {
true => Ok(unsafe { mem::transmute(signum) }),
false => Err(Error::invalid_argument()),
}
}
}
pub const SIGIOT : Signal = SIGABRT;
pub const SIGPOLL : Signal = SIGIO;
pub const SIGUNUSED : Signal = SIGSYS;
bitflags!{
flags SaFlags: libc::c_int {
const SA_NOCLDSTOP = libc::SA_NOCLDSTOP,
const SA_NOCLDWAIT = libc::SA_NOCLDWAIT,
const SA_NODEFER = libc::SA_NODEFER,
const SA_ONSTACK = libc::SA_ONSTACK,
const SA_RESETHAND = libc::SA_RESETHAND,
const SA_RESTART = libc::SA_RESTART,
const SA_SIGINFO = libc::SA_SIGINFO,
}
}
#[repr(i32)]
#[derive(Clone, Copy, PartialEq)]
pub enum SigmaskHow {
SIG_BLOCK = libc::SIG_BLOCK,
SIG_UNBLOCK = libc::SIG_UNBLOCK,
SIG_SETMASK = libc::SIG_SETMASK,
}
#[derive(Clone, Copy)]
pub struct SigSet {
sigset: libc::sigset_t
}
impl SigSet {
pub fn all() -> SigSet {
let mut sigset: libc::sigset_t = unsafe { mem::uninitialized() };
let _ = unsafe { libc::sigfillset(&mut sigset as *mut libc::sigset_t) };
SigSet { sigset: sigset }
}
pub fn empty() -> SigSet {
let mut sigset: libc::sigset_t = unsafe { mem::uninitialized() };
let _ = unsafe { libc::sigemptyset(&mut sigset as *mut libc::sigset_t) };
SigSet { sigset: sigset }
}
pub fn add(&mut self, signal: Signal) {
unsafe { libc::sigaddset(&mut self.sigset as *mut libc::sigset_t, signal as libc::c_int) };
}
pub fn clear(&mut self) {
unsafe { libc::sigemptyset(&mut self.sigset as *mut libc::sigset_t) };
}
pub fn remove(&mut self, signal: Signal) {
unsafe { libc::sigdelset(&mut self.sigset as *mut libc::sigset_t, signal as libc::c_int) };
}
pub fn contains(&self, signal: Signal) -> bool {
let res = unsafe { libc::sigismember(&self.sigset as *const libc::sigset_t, signal as libc::c_int) };
match res {
1 => true,
0 => false,
_ => unreachable!("unexpected value from sigismember"),
}
}
pub fn extend(&mut self, other: &SigSet) {
for signal in Signal::iterator() {
if other.contains(signal) {
self.add(signal);
}
}
}
/// Gets the currently blocked (masked) set of signals for the calling thread.
pub fn thread_get_mask() -> Result<SigSet> {
let mut oldmask: SigSet = unsafe { mem::uninitialized() };
try!(pthread_sigmask(SigmaskHow::SIG_SETMASK, None, Some(&mut oldmask)));
Ok(oldmask)
}
/// Sets the set of signals as the signal mask for the calling thread.
pub fn thread_set_mask(&self) -> Result<()> {
pthread_sigmask(SigmaskHow::SIG_SETMASK, Some(self), None)
}
/// Adds the set of signals to the signal mask for the calling thread.
pub fn thread_block(&self) -> Result<()> {
pthread_sigmask(SigmaskHow::SIG_BLOCK, Some(self), None)
}
/// Removes the set of signals from the signal mask for the calling thread.
pub fn thread_unblock(&self) -> Result<()> {
pthread_sigmask(SigmaskHow::SIG_UNBLOCK, Some(self), None)
}
/// Sets the set of signals as the signal mask, and returns the old mask.
pub fn thread_swap_mask(&self, how: SigmaskHow) -> Result<SigSet> {
let mut oldmask: SigSet = unsafe { mem::uninitialized() };
try!(pthread_sigmask(how, Some(self), Some(&mut oldmask)));
Ok(oldmask)
}
/// Suspends execution of the calling thread until one of the signals in the
/// signal mask becomes pending, and returns the accepted signal.
pub fn wait(&self) -> Result<Signal> {
let mut signum: libc::c_int = unsafe { mem::uninitialized() };
let res = unsafe { libc::sigwait(&self.sigset as *const libc::sigset_t, &mut signum) };
Errno::result(res).map(|_| Signal::from_c_int(signum).unwrap())
}
}
impl AsRef<libc::sigset_t> for SigSet {
fn as_ref(&self) -> &libc::sigset_t {
&self.sigset
}
}
#[allow(unknown_lints)]
#[derive(Clone, Copy, PartialEq)]
pub enum SigHandler {
SigDfl,
SigIgn,
Handler(extern fn(libc::c_int)),
SigAction(extern fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void))
}
pub struct SigAction {
sigaction: libc::sigaction
}
impl SigAction {
/// This function will set or unset the flag `SA_SIGINFO` depending on the
/// type of the `handler` argument.
pub fn new(handler: SigHandler, flags: SaFlags, mask: SigSet) -> SigAction {
let mut s = unsafe { mem::uninitialized::<libc::sigaction>() };
s.sa_sigaction = match handler {
SigHandler::SigDfl => unsafe { mem::transmute(libc::SIG_DFL) },
SigHandler::SigIgn => unsafe { mem::transmute(libc::SIG_IGN) },
SigHandler::Handler(f) => unsafe { mem::transmute(f) },
SigHandler::SigAction(f) => unsafe { mem::transmute(f) },
};
s.sa_flags = match handler {
SigHandler::SigAction(_) => (flags | SA_SIGINFO).bits(),
_ => (flags - SA_SIGINFO).bits(),
};
s.sa_mask = mask.sigset;
SigAction { sigaction: s }
}
}
pub unsafe fn sigaction(signal: Signal, sigaction: &SigAction) -> Result<SigAction> {
let mut oldact = mem::uninitialized::<libc::sigaction>();
let res =
libc::sigaction(signal as libc::c_int, &sigaction.sigaction as *const libc::sigaction, &mut oldact as *mut libc::sigaction);
Errno::result(res).map(|_| SigAction { sigaction: oldact })
}
/// Manages the signal mask (set of blocked signals) for the calling thread.
///
/// If the `set` parameter is `Some(..)`, then the signal mask will be updated with the signal set.
/// The `how` flag decides the type of update. If `set` is `None`, `how` will be ignored,
/// and no modification will take place.
///
/// If the 'oldset' parameter is `Some(..)` then the current signal mask will be written into it.
///
/// If both `set` and `oldset` is `Some(..)`, the current signal mask will be written into oldset,
/// and then it will be updated with `set`.
///
/// If both `set` and `oldset` is None, this function is a no-op.
///
/// For more information, visit the [pthread_sigmask](http://man7.org/linux/man-pages/man3/pthread_sigmask.3.html),
/// or [sigprocmask](http://man7.org/linux/man-pages/man2/sigprocmask.2.html) man pages.
pub fn pthread_sigmask(how: SigmaskHow,
set: Option<&SigSet>,
oldset: Option<&mut SigSet>) -> Result<()> {
if set.is_none() && oldset.is_none() {
return Ok(())
}
let res = unsafe {
// if set or oldset is None, pass in null pointers instead
libc::pthread_sigmask(how as libc::c_int,
set.map_or_else(|| ptr::null::<libc::sigset_t>(),
|s| &s.sigset as *const libc::sigset_t),
oldset.map_or_else(|| ptr::null_mut::<libc::sigset_t>(),
|os| &mut os.sigset as *mut libc::sigset_t))
};
Errno::result(res).map(drop)
}
pub fn kill<T: Into<Option<Signal>>>(pid: libc::pid_t, signal: T) -> Result<()> {
let res = unsafe { libc::kill(pid,
match signal.into() {
Some(s) => s as libc::c_int,
None => 0,
}) };
Errno::result(res).map(drop)
}
pub fn raise(signal: Signal) -> Result<()> {
let res = unsafe { libc::raise(signal as libc::c_int) };
Errno::result(res).map(drop)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_contains() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
assert!(mask.contains(SIGUSR1));
assert!(!mask.contains(SIGUSR2));
let all = SigSet::all();
assert!(all.contains(SIGUSR1));
assert!(all.contains(SIGUSR2));
}
#[test]
fn test_clear() {
let mut set = SigSet::all();
set.clear();
for signal in Signal::iterator() {
assert!(!set.contains(signal));
}
}
#[test]
fn test_extend() {
let mut one_signal = SigSet::empty();
one_signal.add(SIGUSR1);
let mut two_signals = SigSet::empty();
two_signals.add(SIGUSR2);
two_signals.extend(&one_signal);
assert!(two_signals.contains(SIGUSR1));
assert!(two_signals.contains(SIGUSR2));
}
// This test doesn't actually test get_mask functionality, see the set_mask test for that.
#[test]
fn test_thread_signal_get_mask() {
assert!(SigSet::thread_get_mask().is_ok());
}
#[test]
fn test_thread_signal_set_mask() {
let prev_mask = SigSet::thread_get_mask().expect("Failed to get existing signal mask!");
let mut test_mask = prev_mask;
test_mask.add(SIGUSR1);
assert!(test_mask.thread_set_mask().is_ok());
let new_mask = SigSet::thread_get_mask().expect("Failed to get new mask!");
assert!(new_mask.contains(SIGUSR1));
assert!(!new_mask.contains(SIGUSR2));
prev_mask.thread_set_mask().expect("Failed to revert signal mask!");
}
#[test]
fn test_thread_signal_block() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
assert!(mask.thread_block().is_ok());
assert!(SigSet::thread_get_mask().unwrap().contains(SIGUSR1));
}
#[test]
fn test_thread_signal_unblock() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
assert!(mask.thread_unblock().is_ok());
assert!(!SigSet::thread_get_mask().unwrap().contains(SIGUSR1));
}
#[test]
fn | () {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
mask.thread_block().unwrap();
assert!(SigSet::thread_get_mask().unwrap().contains(SIGUSR1));
let mut mask2 = SigSet::empty();
mask2.add(SIGUSR2);
let oldmask = mask2.thread_swap_mask(SigmaskHow::SIG_SETMASK).unwrap();
assert!(oldmask.contains(SIGUSR1));
assert!(!oldmask.contains(SIGUSR2));
assert!(SigSet::thread_get_mask().unwrap().contains(SIGUSR2));
}
// TODO(#251): Re-enable after figuring out flakiness.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
#[test]
fn test_sigwait() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
mask.add(SIGUSR2);
mask.thread_block().unwrap();
raise(SIGUSR1).unwrap();
assert_eq!(mask.wait().unwrap(), SIGUSR1);
}
}
| test_thread_signal_swap |
index.ts | import { AzureFunction, Context, HttpRequest } from '@azure/functions'
import { authenticatedAzureWrap } from '../src/azureWrap'
import displayMessage from '../src/endpoints/displayMessage'
const httpTrigger: AzureFunction = async function (context: Context, req: HttpRequest): Promise<void> { |
export default httpTrigger | await authenticatedAzureWrap(context, req, displayMessage)
} |
help.go | package cli
import (
"fmt"
"os"
"text/tabwriter"
"text/template"
)
// The text template for the Default help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
var AppHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
VERSION:
{{.Version}}{{if or .Author .Email}}
AUTHOR:{{if .Author}}
{{.Author}}{{if .Email}} - <{{.Email}}>{{end}}{{else}}
{{.Email}}{{end}}{{end}}
COMMANDS:
{{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{if .Flags}}
GLOBAL OPTIONS:
{{range .Flags}}{{.}}
{{end}}{{end}}
`
// The text template for the command help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
var CommandHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
command {{.Name}}{{if .Flags}} [command options]{{end}} [arguments...]{{if .Description}}
DESCRIPTION:
{{.Description}}{{end}}{{if .Flags}}
OPTIONS:
{{range .Flags}}{{.}}
{{end}}{{ end }}
`
// The text template for the subcommand help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
var SubcommandHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} command{{if .Flags}} [command options]{{end}} [arguments...]
COMMANDS:
{{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{if .Flags}}
OPTIONS:
{{range .Flags}}{{.}}
{{end}}{{end}}
`
var helpCommand = Command{
Name: "help",
ShortName: "h",
Usage: "Shows a list of commands or help for one command",
Action: func(c *Context) {
args := c.Args()
if args.Present() {
ShowCommandHelp(c, args.First())
} else {
ShowAppHelp(c)
}
},
}
var helpSubcommand = Command{
Name: "help",
ShortName: "h",
Usage: "Shows a list of commands or help for one command",
Action: func(c *Context) {
args := c.Args()
if args.Present() {
ShowCommandHelp(c, args.First())
} else {
ShowSubcommandHelp(c)
}
},
}
// Prints help for the App
var HelpPrinter = printHelp
| // Prints version for the App
var VersionPrinter = printVersion
func ShowAppHelp(c *Context) {
HelpPrinter(AppHelpTemplate, c.App)
}
// Prints the list of subcommands as the default app completion method
func DefaultAppComplete(c *Context) {
for _, command := range c.App.Commands {
fmt.Println(command.Name)
if command.ShortName != "" {
fmt.Println(command.ShortName)
}
}
}
// Prints help for the given command
func ShowCommandHelp(c *Context, command string) {
for _, c := range c.App.Commands {
if c.HasName(command) {
HelpPrinter(CommandHelpTemplate, c)
return
}
}
if c.App.CommandNotFound != nil {
c.App.CommandNotFound(c, command)
} else {
fmt.Printf("No help topic for '%v'\n", command)
}
}
// Prints help for the given subcommand
func ShowSubcommandHelp(c *Context) {
ShowCommandHelp(c, c.Command.Name)
}
// Prints the version number of the App
func ShowVersion(c *Context) {
VersionPrinter(c)
}
func printVersion(c *Context) {
fmt.Printf("%v version %v\n", c.App.Name, c.App.Version)
}
// Prints the lists of commands within a given context
func ShowCompletions(c *Context) {
a := c.App
if a != nil && a.BashComplete != nil {
a.BashComplete(c)
}
}
// Prints the custom completions for a given command
func ShowCommandCompletions(ctx *Context, command string) {
c := ctx.App.Command(command)
if c != nil && c.BashComplete != nil {
c.BashComplete(ctx)
}
}
func printHelp(templ string, data interface{}) {
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
t := template.Must(template.New("help").Parse(templ))
err := t.Execute(w, data)
if err != nil {
panic(err)
}
w.Flush()
}
func checkVersion(c *Context) bool {
if c.GlobalBool("version") {
ShowVersion(c)
return true
}
return false
}
func checkHelp(c *Context) bool {
if c.GlobalBool("h") || c.GlobalBool("help") {
ShowAppHelp(c)
return true
}
return false
}
func checkCommandHelp(c *Context, name string) bool {
if c.Bool("h") || c.Bool("help") {
ShowCommandHelp(c, name)
return true
}
return false
}
func checkSubcommandHelp(c *Context) bool {
if c.GlobalBool("h") || c.GlobalBool("help") {
ShowSubcommandHelp(c)
return true
}
return false
}
func checkCompletions(c *Context) bool {
if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion {
ShowCompletions(c)
return true
}
return false
}
func checkCommandCompletions(c *Context, name string) bool {
if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion {
ShowCommandCompletions(c, name)
return true
}
return false
} | |
main.rs | use clap;
//use slog::Drain;
use std::process;
mod build;
mod clean;
mod init;
mod update;
mod util;
fn run(cli: clap::ArgMatches) -> Result<(), String> {
// TODO do stuff
match cli.subcommand() {
Some(("update", sub_m)) => update::update(sub_m),
Some(("build", sub_m)) => build::build(sub_m),
Some(("init", sub_m)) => init::init(sub_m),
Some(("clean", sub_m)) => clean::clean(sub_m),
_ => panic!(
"I before E, except when your foreign neighbor Keith received eight counterfeit beige sleights from feisty caffeinated weightlifters. Weird."
),
}
// HACK add error handling
Ok(())
}
fn | () {
let matches = clap::Command::new("sitebuilder")
.version("0.1.0")
.author("Ágata Ordano")
.arg_required_else_help(true)
.about("Builds a reactive, interactive, full blown site from a static template")
// TODO Add option to skip cleanup on build
.subcommand(
clap::Command::new("build")
.about("Executes the build process.")
.after_help(
"This command will build the bundle, process it, and run it through Zola.",
)
.arg(
clap::Arg::new("development")
.short('D')
.long("development")
.takes_value(false)
.help("Performs a development build"),
)
.arg(
clap::Arg::new("output")
.short('o')
.long("output")
.takes_value(true)
.help("Set a custom output directory. Defaults to /dist.\nIf this flag contains no path, it will be looked for in the config file.\nIf the config file contains a key for custom output directory, this option will be ignored and the dir weill be always used."),
),
)
.subcommand(
clap::Command::new("init")
.about("Initializes a new sitebuilder project")
.after_help(
"This command will add a template to initialize a new project.",
)
)
.subcommand(
clap::Command::new("update")
.about("Pulls the data, source, content and assets from the remote repositories.")
.arg(clap::Arg::new("content")
.short('c')
.long("content")
.takes_value(false)
.help("Set this flag to only pull content."))
.arg(clap::Arg::new("data")
.short('d')
.long("data")
.takes_value(false)
.help("Set this flag to only pull data"))
.arg(clap::Arg::new("assets")
.short('a')
.long("assets")
.takes_value(false)
.help("Set this flag to only pull assets"))
.arg(clap::Arg::new("source")
.short('s')
.long("source")
.takes_value(false)
.help("Set this flag to only pull and build the source"))
.arg(clap::Arg::new("install")
.short('i')
.long("install")
.takes_value(false)
.help("Set this flag to only install dependencies"))
.after_help(
"By default, this command will pull all the data, content, source and assets from the remote repositories, and install the required dependencies.",
),
)
.subcommand(
clap::Command::new("clean")
.about("Cleans generated/pulled files")
.arg_required_else_help(true)
.subcommand(
clap::Command::new("pulled")
.about("Selectively cleans pulled files")
.arg_required_else_help(true)
.arg(clap::Arg::new("content")
.short('c')
.long("content")
.takes_value(false)
.help("Set this flag to only clean pulled content."))
.arg(clap::Arg::new("data")
.short('d')
.long("data")
.takes_value(false)
.help("Set this flag to only clean pulled data"))
.arg(clap::Arg::new("assets")
.short('a')
.long("assets")
.takes_value(false)
.help("Set this flag to only clean pulled assets"))
.arg(clap::Arg::new("source")
.short('s')
.long("source")
.takes_value(false)
.help("Set this flag to only clean pulled and build the source"))
.after_help(
"By default, this command will clean nothing, specify what to clear using the corresponding flags.",
),
).subcommand(
clap::Command::new("generated")
.about("Clean generated files")
.arg(clap::Arg::new("output")
.short('o')
.long("output")
.takes_value(true)
.help("Set this flag to specify output folder."))
.after_help("Select a custom output directory. Defaults to /dist.\nIf this flag contains no path, it will be looked for in the config file.\nIf the config file contains a key for custom output directory, this option will be ignored and the dir weill be always used.")
.after_help(
"By default, this command will clean using the output specified in the config file or default to dist, specify what to clear using the corresponding flags.",
),
)
.arg(clap::Arg::new("pulled_all")
.short('p')
.long("pulled")
.takes_value(false)
.help("Set this flag to clean all pulled files"))
.after_help(
"By default, this command will clean nothing, specify what to clear using the corresponding flags.",
),
)
.arg(
clap::Arg::new("verbose")
.short('v')
.multiple_occurrences(true)
.help("Sets the level of verbosity"),
)
.get_matches();
if let Err(error) = run(matches) {
println!("Application error: {}", error);
process::exit(1);
}
}
| main |
client.go | package client
import (
"bufio"
"bytes"
"net/http"
)
type Client struct {
Stream <-chan Event
Err error
closeChan chan struct{}
}
type Event struct {
Id string
Type string
Data []byte
}
func New(url string) (*Client, error) |
func (c *Client) Close() error {
c.closeChan <- struct{}{}
return nil
}
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if i := bytes.IndexAny(data, "\r\n"); i >= 0 {
if data[i] == '\r' {
if i == len(data)-1 {
if atEOF {
// final line
return len(data), data[:len(data)-1], nil
}
return 0, nil, nil // LF may follow, request more data
}
if data[i+1] == '\n' {
return i + 2, data[:i], nil
}
return i + 1, data[:i], nil
}
// data[i] == '\n'
return i + 1, data[:i], nil
}
if atEOF {
// final line
return len(data), data, nil
}
// request more data
return 0, nil, nil
}
| {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", "text/event-stream")
req.Close = true
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
stream := make(chan Event, 1024)
client := &Client{
Stream: stream,
}
go func() {
defer close(stream)
go func() {
<-client.closeChan
resp.Body.Close()
for _ = range client.closeChan {
}
}()
s := bufio.NewScanner(resp.Body)
s.Split(scanLines)
var event Event
for s.Scan() {
line := s.Bytes()
if len(line) == 0 {
stream <- event
event = Event{}
}
field := line
value := []byte{}
if colon := bytes.IndexByte(line, ':'); colon != -1 {
if colon == 0 {
continue // comment
}
field = line[:colon]
value = line[colon+1:]
if value[0] == ' ' {
value = value[1:]
}
}
switch string(field) {
case "event":
event.Type = string(value)
case "data":
event.Data = append(append(event.Data, value...), '\n')
case "id":
event.Id = string(value)
case "retry":
// TODO
default:
// ignored
}
}
client.Err = s.Err()
resp.Body.Close()
}()
return client, nil
} |
ToolStripItemAlignment.py | class ToolStripItemAlignment(Enum,IComparable,IFormattable,IConvertible):
"""
Determines the alignment of a System.Windows.Forms.ToolStripItem in a System.Windows.Forms.ToolStrip.
enum ToolStripItemAlignment,values: Left (0),Right (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
| def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Left=None
Right=None
value__=None | def __ge__(self,*args):
pass
|
jellyfish_merkle_test.rs | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use super::*;
use crate::test_helper::{
arb_existent_kvs_and_nonexistent_keys, arb_kv_pair_with_distinct_last_nibble,
arb_tree_with_index, gen_value, test_get_leaf_count, test_get_range_proof, test_get_with_proof,
test_get_with_proof_with_distinct_last_nibble, ValueBlob,
};
use aptos_crypto::HashValue;
use aptos_types::{nibble::Nibble, transaction::PRE_GENESIS_VERSION};
use mock_tree_store::MockTreeStore;
use proptest::{collection::hash_set, prelude::*};
use rand::{rngs::StdRng, Rng, SeedableRng};
use std::collections::HashMap;
fn update_nibble(original_key: &HashValue, n: usize, nibble: u8) -> HashValue {
assert!(nibble < 16);
let mut key = original_key.to_vec();
key[n / 2] = if n % 2 == 0 {
key[n / 2] & 0x0f | nibble << 4
} else {
key[n / 2] & 0xf0 | nibble
};
HashValue::from_slice(&key).unwrap()
}
fn gen_leaf(k: HashValue, v: &(HashValue, ValueBlob), version: Version) -> Node<ValueBlob> {
LeafNode::new(k, v.0, (v.1.clone(), version)).into()
}
#[test]
fn test_insert_to_empty_tree() {
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
// Tree is initially empty. Root is a null node. We'll insert a key-value pair which creates a
// leaf node.
let key = HashValue::random();
let state_key = ValueBlob::from(vec![1u8, 2u8, 3u8, 4u8]);
let value_hash = HashValue::random();
// batch version
let (_new_root_hash, batch) = tree
.batch_put_value_sets_test(
vec![vec![(key, &(value_hash, state_key))]],
None,
0, /* version */
)
.unwrap();
assert!(batch.stale_node_index_batch.is_empty());
db.write_tree_update_batch(batch).unwrap();
assert_eq!(tree.get(key, 0).unwrap().unwrap(), value_hash);
}
#[test]
fn test_insert_to_pre_genesis() {
// Set up DB with pre-genesis state (one single leaf node).
let db = MockTreeStore::default();
let key1 = HashValue::new([0x00u8; HashValue::LENGTH]);
let value1 = gen_value();
let pre_genesis_root_key = NodeKey::new_empty_path(PRE_GENESIS_VERSION);
db.put_node(
pre_genesis_root_key,
gen_leaf(key1, &value1, PRE_GENESIS_VERSION),
)
.unwrap();
// Genesis inserts one more leaf.
let tree = JellyfishMerkleTree::new(&db);
let key2 = update_nibble(&key1, 0, 15);
let value2 = gen_value();
// batch version
let (_root_hash, batch) = tree
.batch_put_value_sets(
vec![vec![(key2, &value2)]],
None,
Some(PRE_GENESIS_VERSION),
0, /* version */
)
.unwrap();
// Check pre-genesis node prunes okay.
assert_eq!(batch.stale_node_index_batch.len(), 1);
db.write_tree_update_batch(batch).unwrap();
assert_eq!(db.num_nodes(), 4);
db.purge_stale_nodes(0).unwrap();
assert_eq!(db.num_nodes(), 3);
// Check mixed state reads okay.
assert_eq!(tree.get(key1, 0).unwrap().unwrap(), value1.0);
assert_eq!(tree.get(key2, 0).unwrap().unwrap(), value2.0);
}
#[test]
fn test_insert_at_leaf_with_internal_created() {
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let key1 = HashValue::new([0x00u8; HashValue::LENGTH]);
let value1 = gen_value();
let (_root0_hash, batch) = tree
.batch_put_value_sets_test(vec![vec![(key1, &value1)]], None, 0 /* version */)
.unwrap();
assert!(batch.stale_node_index_batch.is_empty());
db.write_tree_update_batch(batch).unwrap();
assert_eq!(tree.get(key1, 0).unwrap().unwrap(), value1.0);
// Insert at the previous leaf node. Should generate an internal node at the root.
// Change the 1st nibble to 15.
let key2 = update_nibble(&key1, 0, 15);
let value2 = gen_value();
let (_root1_hash, batch) = tree
.batch_put_value_sets_test(vec![vec![(key2, &value2)]], None, 1 /* version */)
.unwrap();
assert_eq!(batch.stale_node_index_batch.len(), 1);
db.write_tree_update_batch(batch).unwrap();
assert_eq!(tree.get(key1, 0).unwrap().unwrap(), value1.0);
assert!(tree.get(key2, 0).unwrap().is_none());
assert_eq!(tree.get(key2, 1).unwrap().unwrap(), value2.0);
// get # of nodes
assert_eq!(db.num_nodes(), 4 /* 1 + 3 */);
let internal_node_key = NodeKey::new_empty_path(1);
let leaf1 = gen_leaf(key1, &value1, 0);
let leaf2 = gen_leaf(key2, &value2, 1);
let mut children = HashMap::new();
children.insert(
Nibble::from(0),
Child::new(leaf1.hash(), 1 /* version */, NodeType::Leaf),
);
children.insert(
Nibble::from(15),
Child::new(leaf2.hash(), 1 /* version */, NodeType::Leaf),
);
let internal = Node::new_internal(children);
assert_eq!(db.get_node(&NodeKey::new_empty_path(0)).unwrap(), leaf1);
assert_eq!(
db.get_node(&internal_node_key.gen_child_node_key(1 /* version */, Nibble::from(0)))
.unwrap(),
leaf1
);
assert_eq!(
db.get_node(&internal_node_key.gen_child_node_key(1 /* version */, Nibble::from(15)))
.unwrap(),
leaf2
);
assert_eq!(db.get_node(&internal_node_key).unwrap(), internal);
}
#[test]
fn test_insert_at_leaf_with_multiple_internals_created() {
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
// 1. Insert the first leaf into empty tree
let key1 = HashValue::new([0x00u8; HashValue::LENGTH]);
let value1 = gen_value();
let (_root0_hash, batch) = tree
.batch_put_value_sets_test(vec![vec![(key1, &value1)]], None, 0 /* version */)
.unwrap();
db.write_tree_update_batch(batch).unwrap();
assert_eq!(tree.get(key1, 0).unwrap().unwrap(), value1.0);
// 2. Insert at the previous leaf node. Should generate a branch node at root.
// Change the 2nd nibble to 1.
let key2 = update_nibble(&key1, 1 /* nibble_index */, 1 /* nibble */);
let value2 = gen_value();
let (_root1_hash, batch) = tree
.batch_put_value_sets_test(vec![vec![(key2, &value2)]], None, 1 /* version */)
.unwrap();
db.write_tree_update_batch(batch).unwrap();
assert_eq!(tree.get(key1, 0).unwrap().unwrap(), value1.0);
assert!(tree.get(key2, 0).unwrap().is_none());
assert_eq!(tree.get(key2, 1).unwrap().unwrap(), value2.0);
assert_eq!(db.num_nodes(), 5);
let internal_node_key = NodeKey::new(1, NibblePath::new_odd(vec![0x00]));
let leaf1 = gen_leaf(key1, &value1, 0);
let leaf2 = gen_leaf(key2, &value2, 1);
let internal = {
let mut children = HashMap::new();
children.insert(
Nibble::from(0),
Child::new(leaf1.hash(), 1 /* version */, NodeType::Leaf),
);
children.insert(
Nibble::from(1),
Child::new(leaf2.hash(), 1 /* version */, NodeType::Leaf),
);
Node::new_internal(children)
};
let root_internal = {
let mut children = HashMap::new();
children.insert(
Nibble::from(0),
Child::new(
internal.hash(),
1, /* version */
NodeType::Internal { leaf_count: 2 },
),
);
Node::new_internal(children)
};
assert_eq!(db.get_node(&NodeKey::new_empty_path(0)).unwrap(), leaf1);
assert_eq!(
db.get_node(&internal_node_key.gen_child_node_key(1 /* version */, Nibble::from(0)))
.unwrap(),
leaf1,
);
assert_eq!(
db.get_node(&internal_node_key.gen_child_node_key(1 /* version */, Nibble::from(1)))
.unwrap(),
leaf2,
);
assert_eq!(db.get_node(&internal_node_key).unwrap(), internal);
assert_eq!(
db.get_node(&NodeKey::new_empty_path(1)).unwrap(),
root_internal,
);
// 3. Update leaf2 with new value
let value2_update = gen_value();
let (_root2_hash, batch) = tree
.batch_put_value_sets_test(
vec![vec![(key2, &value2_update)]],
None,
2, /* version */
)
.unwrap();
db.write_tree_update_batch(batch).unwrap();
assert!(tree.get(key2, 0).unwrap().is_none());
assert_eq!(tree.get(key2, 1).unwrap().unwrap(), value2.0);
assert_eq!(tree.get(key2, 2).unwrap().unwrap(), value2_update.0);
// Get # of nodes.
assert_eq!(db.num_nodes(), 8);
// Purge retired nodes.
db.purge_stale_nodes(1).unwrap();
assert_eq!(db.num_nodes(), 7);
db.purge_stale_nodes(2).unwrap();
assert_eq!(db.num_nodes(), 4);
assert_eq!(tree.get(key1, 2).unwrap().unwrap(), value1.0);
assert_eq!(tree.get(key2, 2).unwrap().unwrap(), value2_update.0);
}
#[test]
fn test_batch_insertion() {
// ```text
// internal(root)
// / \
// internal 2 <- nibble 0
// / | \
// internal 3 4 <- nibble 1
// |
// internal <- nibble 2
// / \
// internal 6 <- nibble 3
// |
// internal <- nibble 4
// / \
// 1 5 <- nibble 5
//
// Total: 12 nodes
// ```
let key1 = HashValue::new([0x00u8; HashValue::LENGTH]);
let value1 = gen_value();
let key2 = update_nibble(&key1, 0, 2);
let value2 = gen_value();
let value2_update = gen_value();
let key3 = update_nibble(&key1, 1, 3);
let value3 = gen_value();
let key4 = update_nibble(&key1, 1, 4);
let value4 = gen_value();
let key5 = update_nibble(&key1, 5, 5);
let value5 = gen_value();
let key6 = update_nibble(&key1, 3, 6);
let value6 = gen_value();
let batches = vec![
vec![(key1, &value1)],
vec![(key2, &value2)],
vec![(key3, &value3)],
vec![(key4, &value4)],
vec![(key5, &value5)],
vec![(key6, &value6)],
vec![(key2, &value2_update)],
];
let one_batch = batches.iter().flatten().cloned().collect::<Vec<_>>();
let mut to_verify = one_batch.clone();
// key2 was updated so we remove it.
to_verify.remove(1);
let verify_fn = |tree: &JellyfishMerkleTree<MockTreeStore<ValueBlob>, ValueBlob>,
version: Version| {
to_verify
.iter()
.for_each(|(k, v)| assert_eq!(tree.get(*k, version).unwrap().unwrap(), v.0))
};
// Insert as one batch and update one by one.
{
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let (_root, batch) = tree.put_value_set_test(one_batch, 0 /* version */).unwrap();
db.write_tree_update_batch(batch).unwrap();
verify_fn(&tree, 0);
// get # of nodes
assert_eq!(db.num_nodes(), 12);
}
// Insert in multiple batches.
{
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let (_roots, batch) = tree
.batch_put_value_sets_test(batches, None, 0 /* first_version */)
.unwrap();
db.write_tree_update_batch(batch).unwrap();
verify_fn(&tree, 6);
// get # of nodes
assert_eq!(db.num_nodes(), 26 /* 1 + 3 + 4 + 3 + 8 + 5 + 2 */);
// Purge retired nodes('p' means purged and 'a' means added).
// The initial state of the tree at version 0
// ```test
// 1(root)
// ```
db.purge_stale_nodes(1).unwrap();
// ```text
// 1 (p) internal(a)
// -> / \
// 1(a) 2(a)
// add 3, prune 1
// ```
assert_eq!(db.num_nodes(), 25);
db.purge_stale_nodes(2).unwrap();
// ```text
// internal(p) internal(a)
// / \ / \
// 1(p) 2 -> internal(a) 2
// / \
// 1(a) 3(a)
// add 4, prune 2
// ```
assert_eq!(db.num_nodes(), 23);
db.purge_stale_nodes(3).unwrap();
// ```text
// internal(p) internal(a)
// / \ / \
// internal(p) 2 -> internal(a) 2
// / \ / | \
// 1 3 1 3 4(a)
// add 3, prune 2
// ```
assert_eq!(db.num_nodes(), 21);
db.purge_stale_nodes(4).unwrap();
// ```text
// internal(p) internal(a)
// / \ / \
// internal(p) 2 internal(a) 2
// / | \ / | \
// 1(p) 3 4 -> internal(a) 3 4
// |
// internal(a)
// |
// internal(a)
// |
// internal(a)
// / \
// 1(a) 5(a)
// add 8, prune 3
// ```
assert_eq!(db.num_nodes(), 18);
db.purge_stale_nodes(5).unwrap();
// ```text
// internal(p) internal(a)
// / \ / \
// internal(p) 2 internal(a) 2
// / | \ / | \
// internal(p) 3 4 internal(a) 3 4
// | |
// internal(p) -> internal(a)
// | / \
// internal internal 6(a)
// | |
// internal internal
// / \ / \
// 1 5 1 5
// add 5, prune 4
// ```
assert_eq!(db.num_nodes(), 14);
db.purge_stale_nodes(6).unwrap();
// ```text
// internal(p) internal(a)
// / \ / \
// internal 2(p) internal 2(a)
// / | \ / | \
// internal 3 4 internal 3 4
// | |
// internal -> internal
// / \ / \
// internal 6 internal 6
// | |
// internal internal
// / \ / \
// 1 5 1 5
// add 2, prune 2
// ```
assert_eq!(db.num_nodes(), 12);
verify_fn(&tree, 6);
}
}
#[test]
fn test_non_existence() {
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
// ```text
// internal(root)
// / \
// internal 2
// |
// internal
// / \
// 1 3
// Total: 7 nodes
// ```
let key1 = HashValue::new([0x00u8; HashValue::LENGTH]);
let value1 = gen_value();
let key2 = update_nibble(&key1, 0, 15);
let value2 = gen_value();
let key3 = update_nibble(&key1, 2, 3);
let value3 = gen_value();
let (roots, batch) = tree
.batch_put_value_sets_test(
vec![vec![(key1, &value1), (key2, &value2), (key3, &value3)]],
None,
0, /* version */
)
.unwrap();
db.write_tree_update_batch(batch).unwrap();
assert_eq!(tree.get(key1, 0).unwrap().unwrap(), value1.0);
assert_eq!(tree.get(key2, 0).unwrap().unwrap(), value2.0);
assert_eq!(tree.get(key3, 0).unwrap().unwrap(), value3.0);
// get # of nodes
assert_eq!(db.num_nodes(), 6);
// test non-existing nodes.
// 1. Non-existing node at root node
{
let non_existing_key = update_nibble(&key1, 0, 1);
let (value, proof) = tree.get_with_proof(non_existing_key, 0).unwrap();
assert_eq!(value, None);
assert!(proof
.verify_by_hash(roots[0], non_existing_key, None)
.is_ok());
}
// 2. Non-existing node at non-root internal node
{
let non_existing_key = update_nibble(&key1, 1, 15);
let (value, proof) = tree.get_with_proof(non_existing_key, 0).unwrap();
assert_eq!(value, None);
assert!(proof
.verify_by_hash(roots[0], non_existing_key, None)
.is_ok());
}
// 3. Non-existing node at leaf node
{
let non_existing_key = update_nibble(&key1, 2, 4);
let (value, proof) = tree.get_with_proof(non_existing_key, 0).unwrap();
assert_eq!(value, None);
assert!(proof
.verify_by_hash(roots[0], non_existing_key, None)
.is_ok());
}
}
#[test]
fn test_missing_root() {
let db = MockTreeStore::<ValueBlob>::default();
let tree = JellyfishMerkleTree::new(&db);
let err = tree
.get_with_proof(HashValue::random(), 0)
.err()
.unwrap()
.downcast::<MissingRootError>()
.unwrap();
assert_eq!(err.version, 0);
}
#[test]
fn test_put_value_sets() {
let mut keys = vec![];
let total_updates = 20;
let values: Vec<_> = (0..total_updates).map(|_i| gen_value()).collect();
for _i in 0..total_updates {
keys.push(HashValue::random());
}
let mut root_hashes_one_by_one = vec![];
let mut batch_one_by_one = TreeUpdateBatch::default();
{
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let mut index = 0;
for version in 0..10 {
let mut keyed_value_set = vec![];
for _ in 0..total_updates / 10 {
keyed_value_set.push((keys[index], &values[index]));
index += 1;
}
let (root, batch) = tree
.put_value_set_test(keyed_value_set, version as Version)
.unwrap();
db.write_tree_update_batch(batch.clone()).unwrap();
root_hashes_one_by_one.push(root);
batch_one_by_one.node_batch.extend(batch.node_batch);
batch_one_by_one
.stale_node_index_batch
.extend(batch.stale_node_index_batch);
batch_one_by_one.node_stats.extend(batch.node_stats);
}
}
{
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let mut value_sets = vec![];
let mut index = 0;
for _ in 0..10 {
let mut keyed_value_set = vec![];
for _ in 0..total_updates / 10 {
keyed_value_set.push((keys[index], &values[index]));
index += 1;
}
value_sets.push(keyed_value_set);
}
let (root_hashes, batch) = tree
.batch_put_value_sets_test(value_sets, None, 0 /* version */)
.unwrap();
assert_eq!(root_hashes, root_hashes_one_by_one);
assert_eq!(batch, batch_one_by_one);
}
}
fn | (seed: &[u8], num_keys: usize) {
assert!(seed.len() < 32);
let mut actual_seed = [0u8; 32];
actual_seed[..seed.len()].copy_from_slice(seed);
let mut rng: StdRng = StdRng::from_seed(actual_seed);
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let mut kvs = vec![];
let values: Vec<_> = (0..num_keys).map(|_i| gen_value()).collect();
for (index, _) in values.iter().enumerate() {
let key = HashValue::random_with_rng(&mut rng);
kvs.push((key, &values[index]));
}
let (roots, batch) = tree
.batch_put_value_sets_test(vec![kvs.clone()], None, 0 /* version */)
.unwrap();
db.write_tree_update_batch(batch).unwrap();
for (k, v) in &kvs {
let (value, proof) = tree.get_with_proof(*k, 0).unwrap();
assert_eq!(value.as_ref().unwrap().0, v.0);
assert_eq!(value.as_ref().unwrap().1 .0, v.1);
assert!(proof.verify_by_hash(roots[0], *k, Some(v.0)).is_ok());
}
}
#[test]
fn test_1000_keys() {
let seed: &[_] = &[1, 2, 3, 4];
many_keys_get_proof_and_verify_tree_root(seed, 1000);
}
fn many_versions_get_proof_and_verify_tree_root(seed: &[u8], num_versions: usize) {
assert!(seed.len() < 32);
let mut actual_seed = [0u8; 32];
actual_seed[..seed.len()].copy_from_slice(seed);
let mut rng: StdRng = StdRng::from_seed(actual_seed);
let db = MockTreeStore::default();
let tree = JellyfishMerkleTree::new(&db);
let mut kvs = vec![];
let mut roots = vec![];
let values: Vec<_> = (0..num_versions).map(|_i| gen_value()).collect();
let new_values: Vec<_> = (0..num_versions).map(|_i| gen_value()).collect();
for i in 0..num_versions {
let key = HashValue::random_with_rng(&mut rng);
kvs.push((key, &values[i], &new_values[i]));
}
for (idx, kvs) in kvs.iter().enumerate() {
let (root, batch) = tree
.batch_put_value_sets_test(vec![vec![(kvs.0, kvs.1)]], None, idx as Version)
.unwrap();
roots.push(root[0]);
db.write_tree_update_batch(batch).unwrap();
}
// Update value of all keys
for (idx, kvs) in kvs.iter().enumerate() {
let version = (num_versions + idx) as Version;
let (root, batch) = tree
.batch_put_value_sets_test(vec![vec![(kvs.0, kvs.2)]], None, version)
.unwrap();
roots.push(root[0]);
db.write_tree_update_batch(batch).unwrap();
}
for (i, (k, v, _)) in kvs.iter().enumerate() {
let random_version = rng.gen_range(i..i + num_versions);
let (value, proof) = tree.get_with_proof(*k, random_version as Version).unwrap();
assert_eq!(value.as_ref().unwrap().0, v.0);
assert_eq!(value.as_ref().unwrap().1 .0, v.1);
assert!(proof
.verify_by_hash(roots[random_version], *k, Some(v.0))
.is_ok());
}
for (i, (k, _, v)) in kvs.iter().enumerate() {
let random_version = rng.gen_range(i + num_versions..2 * num_versions);
let (value, proof) = tree.get_with_proof(*k, random_version as Version).unwrap();
assert_eq!(value.as_ref().unwrap().0, v.0);
assert_eq!(value.as_ref().unwrap().1 .0, v.1);
assert!(proof
.verify_by_hash(roots[random_version], *k, Some(v.0))
.is_ok());
}
}
#[test]
fn test_1000_versions() {
let seed: &[_] = &[1, 2, 3, 4];
many_versions_get_proof_and_verify_tree_root(seed, 1000);
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
#[test]
fn proptest_get_with_proof((existent_kvs, nonexistent_keys) in arb_existent_kvs_and_nonexistent_keys::<ValueBlob>(1000, 100)) {
test_get_with_proof((existent_kvs, nonexistent_keys))
}
#[test]
fn proptest_get_with_proof_with_distinct_last_nibble((kv1, kv2) in arb_kv_pair_with_distinct_last_nibble::<ValueBlob>()) {
test_get_with_proof_with_distinct_last_nibble((kv1, kv2))
}
#[test]
fn proptest_get_range_proof((btree, n) in arb_tree_with_index::<ValueBlob>(1000)) {
test_get_range_proof((btree, n))
}
#[test]
fn proptest_get_leaf_count(keys in hash_set(any::<HashValue>(), 1..1000)) {
test_get_leaf_count(keys)
}
}
| many_keys_get_proof_and_verify_tree_root |
state_proof.go | // Copyright IBM Corp. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/hyperledger-labs/orion-sdk-go/examples/util"
"github.com/hyperledger-labs/orion-sdk-go/pkg/bcdb"
"github.com/hyperledger-labs/orion-sdk-go/pkg/config"
"github.com/hyperledger-labs/orion-server/pkg/logger"
"github.com/hyperledger-labs/orion-server/pkg/types"
)
func main() {
if err := ExecuteStateTrieExample("../../util/config.yml"); err != nil {
os.Exit(1)
}
}
func ExecuteStateTrieExample(configLocation string) error {
session, err := OpenSession(configLocation)
if err != nil {
fmt.Printf("Database session creating failed, reason: %s\n", err.Error())
return err
}
_, receipt1, err := SyncDataUpdate(session, []string{"key1", "key2"}, []string{"val1", "val2"}, []string{})
if err != nil {
fmt.Printf("Syncronious update data failed, reason: %s\n", err.Error())
return err
}
_, receipt2, err := SyncDataUpdate(session, []string{"key1"}, []string{"val0"}, []string{"key2"})
if err != nil {
fmt.Printf("Syncronious update data failed, reason: %s\n", err.Error())
return err
}
ledger, err := session.Ledger()
if err != nil {
fmt.Printf("Can't get access to ledger, reason: %s\n", err.Error())
return err
}
// key1 has value val1 after the first transaction (receipt1), lets prove it
// Value to verify
dbName := "bdb"
key := "key1"
value := "val1"
deleted := false
// Lets query server for proof for value associated with key key1 in the data block pointed to by receipt1
fmt.Printf("Getting proof from server for db %s, key %s, value %s in block %d\n", dbName, key, value, receipt1.GetHeader().GetBaseHeader().GetNumber())
stateProof, err := ledger.GetDataProof(receipt1.GetHeader().GetBaseHeader().GetNumber(), dbName, key, deleted)
if err != nil {
fmt.Printf("Can't get state proof ledger, reason: %s\n", err.Error())
return err
}
fmt.Printf("Calculate value hash in Merkle-Patricia trie (stored as field in Branch or Value nodes) for db %s, key %s, value %s in state trie, by concatinating db, key and value hashes\n", dbName, key, value)
valueHash, err := bcdb.CalculateValueHash(dbName, key, []byte(value))
if err != nil {
fmt.Printf("Failed to calculate value hash in Merkle-Patricia trie , reason: %s\n", err.Error())
return err
}
fmt.Println("Verify proof using value hash and Merkle-Patricia trie root stored in block header")
isCorrect, err := stateProof.Verify(valueHash, receipt1.GetHeader().GetStateMerkelTreeRootHash(), deleted)
if err != nil {
fmt.Printf("Can't verify value in state trie, reason: %s\n", err.Error())
return err
}
if isCorrect {
fmt.Printf("Value [%s, %s, %s] is part of state trie at block %d\n", dbName, key, value, receipt1.GetHeader().GetBaseHeader().GetNumber())
} else {
errors.Errorf("Value [%s, %s, %s] is not part of state trie at block %d\n", dbName, key, value, receipt1.GetHeader().GetBaseHeader().GetNumber())
}
// key1 has value val0 after the second transaction (receipt2), lets prove it
// Value to verify
value = "val0"
// Lets query server for proof for value associated with key key1 in the data block pointed to by receipt2
fmt.Printf("Getting proof from server for db %s, key %s, value %s in block %d\n", dbName, key, value, receipt2.GetHeader().GetBaseHeader().GetNumber())
stateProof, err = ledger.GetDataProof(receipt2.GetHeader().GetBaseHeader().GetNumber(), dbName, key, deleted)
if err != nil {
fmt.Printf("Can't get state proof ledger, reason: %s\n", err.Error())
return err
}
fmt.Printf("Calculate value hash in Merkle-Patricia trie (stored as field in Branch or Value nodes) for db %s, key %s, value %s in state trie, by concatinating db, key and value hashes\n", dbName, key, value)
valueHash, err = bcdb.CalculateValueHash(dbName, key, []byte(value))
if err != nil {
fmt.Printf("Failed to calculate value hash in Merkle-Patricia trie , reason: %s\n", err.Error())
return err
}
fmt.Println("Verify proof using value hash and Merkle-Patricia trie root stored in block header")
isCorrect, err = stateProof.Verify(valueHash, receipt2.GetHeader().GetStateMerkelTreeRootHash(), deleted)
if err != nil {
fmt.Printf("Can't verify value in state trie, reason: %s\n", err.Error())
return err
}
if isCorrect {
fmt.Printf("Value [%s, %s, %s] is part of state trie at block %d\n", dbName, key, value, receipt2.GetHeader().GetBaseHeader().GetNumber())
} else {
return errors.Errorf("Value [%s, %s, %s] is not part of state trie at block %d\n", dbName, key, value, receipt2.GetHeader().GetBaseHeader().GetNumber())
}
// Old value of key1 is overwritten in second transaction
// Because proof query from server doesn't contains value as argument, we can use proof from above
value = "val1"
fmt.Printf("Calculate value hash in Merkle-Patricia trie (stored as field in Branch or Value nodes) for db %s, key %s, value %s in state trie, by concatinating db, key and value hashes\n", dbName, key, value)
valueHash, err = bcdb.CalculateValueHash(dbName, key, []byte(value))
if err != nil {
fmt.Printf("Failed to calculate value hash in Merkle-Patricia trie , reason: %s\n", err.Error())
return err
}
fmt.Println("Verify proof using value hash and Merkle-Patricia trie root stored in block header")
isCorrect, err = stateProof.Verify(valueHash, receipt2.GetHeader().GetStateMerkelTreeRootHash(), deleted)
if err != nil {
fmt.Printf("Can't verify value in state trie, reason: %s\n", err.Error())
return err
}
if isCorrect {
return errors.Errorf("Value [%s, %s, %s] is part of state trie at block %d\n", dbName, key, value, receipt2.Header.BaseHeader.Number)
} else {
fmt.Printf("Value [%s, %s, %s] is not part of state trie at block %d\n", dbName, key, value, receipt2.Header.BaseHeader.Number)
}
// Key key2 value val2 was deleted in the second transaction, lets prove it
// by querying server for proof with deleted flag set to true
key = "key2"
deleted = true
value = "val2"
fmt.Printf("Verify key is deleted: db %s, key %s, value %s in block %d\n", dbName, key, value, receipt2.GetHeader().GetBaseHeader().GetNumber())
stateProof, err = ledger.GetDataProof(receipt2.GetHeader().GetBaseHeader().GetNumber(), dbName, key, deleted)
if err != nil {
fmt.Printf("Can't get state proof ledger, reason: %s\n", err.Error())
return err
}
fmt.Printf("Calculate value hash in Merkle-Patricia trie (stored as field in Branch or Value nodes) for db %s, key %s, value %s in state trie, by concatinating db, key and value hashes\n", dbName, key, value)
valueHash, err = bcdb.CalculateValueHash(dbName, key, []byte(value))
if err != nil {
fmt.Printf("Failed to calculate value hash in Merkle-Patricia trie , reason: %s\n", err.Error())
return err
}
fmt.Println("Verify proof using value hash and Merkle-Patricia trie root stored in block header")
isCorrect, err = stateProof.Verify(valueHash, receipt2.GetHeader().GetStateMerkelTreeRootHash(), deleted)
if err != nil {
fmt.Printf("Can't verify value in state trie, reason: %s\n", err.Error())
return err
}
if isCorrect {
fmt.Printf("Value [%s, %s, %s] was deleted in state trie at block %d\n", dbName, key, value, receipt2.GetHeader().GetBaseHeader().GetNumber())
} else {
errors.Errorf("Value [%s, %s, %s] wasn't deleted in state trie at block %d\n", dbName, key, value, receipt2.GetHeader().GetBaseHeader().GetNumber())
}
return nil
}
func OpenSession(configLocation string) (bcdb.DBSession, error) {
c, err := util.ReadConfig(configLocation) | }
logger, err := logger.New(
&logger.Config{
Level: "debug",
OutputPath: []string{"stdout"},
ErrOutputPath: []string{"stderr"},
Encoding: "console",
Name: "bcdb-client",
},
)
if err != nil {
return nil, err
}
conConf := &config.ConnectionConfig{
ReplicaSet: c.ConnectionConfig.ReplicaSet,
RootCAs: c.ConnectionConfig.RootCAs,
Logger: logger,
}
fmt.Println("Opening connection to database, configuration: ", c.ConnectionConfig)
db, err := bcdb.Create(conConf)
if err != nil {
return nil, err
}
sessionConf := &config.SessionConfig{
UserConfig: c.SessionConfig.UserConfig,
TxTimeout: c.SessionConfig.TxTimeout,
QueryTimeout: c.SessionConfig.QueryTimeout}
fmt.Println("Opening session to database, configuration: ", c.SessionConfig)
session, err := db.Session(sessionConf)
if err != nil {
return nil, err
}
return session, nil
}
func SyncDataUpdate(session bcdb.DBSession, updateKeys, updateValues, deleteKeys []string) (string, *types.TxReceipt, error) {
fmt.Println("Opening data transaction")
tx, err := session.DataTx()
if err != nil {
fmt.Printf("Data transaction creating failed, reason: %s\n", err.Error())
return "", nil, err
}
for i := 0; i < len(updateKeys); i++ {
key := updateKeys[i]
value := updateValues[i]
fmt.Printf("Updating key value (%s, %s) in the database\n", key, value)
err = tx.Put("bdb", key, []byte(value), nil)
if err != nil {
fmt.Printf("Update key (%s) value in the database failed, reason: %s\n", key, err.Error())
return "", nil, err
}
}
for _, key := range deleteKeys {
fmt.Printf("Deleting key (%s) from the database\n", key)
err = tx.Delete("bdb", key)
if err != nil {
fmt.Printf("Deleting key (%s) from the database failed, reason: %s\n", key, err.Error())
return "", nil, err
}
}
fmt.Println("Committing transaction synchronously")
txID, receiptEnv, err := tx.Commit(true)
if err != nil {
fmt.Printf("Commit failed, reason: %s\n", err.Error())
return "", nil, err
}
receipt := receiptEnv.GetResponse().GetReceipt()
fmt.Printf("Transaction ID %s committed successfully in block %d\n", txID, receipt.GetHeader().GetBaseHeader().GetNumber())
return txID, receipt, nil
} | if err != nil {
return nil, err |
utils.rs | //! Defines macros to build Logics on top of theories.
#[macro_export]
macro_rules! define_sorts_for_logic {
($logic: ident, $($variant: ident -> $sort: ty),*) => {
#[derive(Clone, Debug)]
pub enum $logic {
$(
$variant($sort),
)*
}
$(
impl Into<$logic> for $sort {
fn into(self) -> $logic {
$logic::$variant(self)
}
}
)*
impl fmt::Display for $logic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match *self {
$(
$logic::$variant(ref op) => op.to_string(),
)*
};
write!(f, "{}", s)
}
} | }
}
#[macro_export]
macro_rules! define_fns_for_logic {
($logic: ident, map { $($variant: ident -> $sort: ty),* }, obool { $($ff: pat => $b: expr),* }) => {
#[derive(Clone, Debug)]
pub enum $logic {
$(
$variant($sort),
)*
}
$(
impl Into<$logic> for $sort {
fn into(self) -> $logic {
$logic::$variant(self)
}
}
)*
impl fmt::Display for $logic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match *self {
$(
$logic::$variant(ref op) => op.to_string(),
)*
};
write!(f, "{}", s)
}
}
impl SMTNode for $logic {
fn is_var(&self) -> bool {
match *self {
$(
$logic::$variant(ref inner) => inner.is_var(),
)*
}
}
fn is_const(&self) -> bool {
match *self {
$(
$logic::$variant(ref inner) => inner.is_const(),
)*
}
}
fn is_bool(&self) -> bool {
match *self {
$(
$ff => $b,
)*
_ => false,
}
}
}
}
}
#[macro_export]
macro_rules! define_logic {
($logic: ident, $op: ident, $sorts: ty, map { $($fv: pat => $rt: path),* }) => {
#[derive(Clone, Copy, Debug)]
pub struct $logic;
impl fmt::Display for $logic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", stringify!($logic))
}
}
impl Logic for $logic {
type Fns = $op;
type Sorts = $sorts;
fn free_var<T: AsRef<str>>(name: T, ty: $sorts) -> Self::Fns {
match ty {
$(
$fv => $rt(name.as_ref().to_owned()).into(),
)*
_ => unreachable!(),
}
}
}
}
} | |
evaluate_simulation_coda_gan.py | import matplotlib.pyplot as plt
import argparse
import os
from collections import defaultdict
import habitat
import numpy as np
import quaternion
import torch
from evaluate_reality import load_model
from gym.spaces.dict_space import Dict as SpaceDict
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat.utils.visualizations.utils import (images_to_video,
observations_to_image)
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.env_utils import construct_envs
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.utils import batch_obs, generate_video
from habitat_baselines.config.default import get_config
from habitat_sim import geo
from habitat_sim.utils.common import quat_from_two_vectors, quat_rotate_vector
from PIL import Image
from predictor import Predictor
def quat_to_rad(rotation):
heading_vector = quaternion_rotate_vector(
rotation.inverse(), np.array([0, 0, -1])
)
phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return phi
def create_state(position, rotation):
rotation_mp3d_habitat = quat_from_two_vectors(geo.GRAVITY, np.array([0, 0, -1]))
pt_mp3d = quat_rotate_vector(rotation_mp3d_habitat, position) # That point in the mp3d scene mesh coordinate frame.
state_xyt = [pt_mp3d[0], pt_mp3d[1]]
theta = quat_to_rad(rotation)
state_xyt.append(theta)
return state_xyt
def create_traj_labels(input_arr):
r, c = input_arr.shape
# labels: d_x, d_y, cos_d_t, sin_d_t
diff = np.diff(input_arr, axis=0)
labels_arr = np.zeros((r-1, 4))
labels_arr[:, :2] = diff[:, :2]
labels_arr[:, 2] = np.cos(diff[:, 2])
labels_arr[:, 3] = np.sin(diff[:, 2])
return labels_arr
def convert_embedding(input_arr_embed):
# SIMULATOR_REALITY_ACTIONS = {"stop": 0, "forward": 1 , "left": 2 , "right": 3}
ONE_HOT_ACTIONS = {"0": [0, 0, 0], "1": [0, 0, 1] , "2": [0, 1, 0] , "3": [1, 0, 0]}
r, c = input_arr_embed.shape
input_arr_oneHot = np.zeros((r, c+2))
input_arr_oneHot[:, :4] = input_arr_embed[:, :4]
for row in range(r):
input_arr_oneHot[row, 4:] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]
## if logging collisions
# input_arr_oneHot[row, 4:7] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]
# input_arr_embed[:, -1] = input_arr_embed[:, 5]
return input_arr_oneHot
def save_trajectory(data, datasplit, traj_dir, traj_ctr, datatype, embed_type=""):
pathend = datasplit + '_' + '%03d'%traj_ctr
if embed_type != "":
embed_type += "_"
filename = os.path.join(traj_dir, datatype + '_LRF_' + embed_type + pathend)
print('saving: ', filename)
np.save(filename, data[:, :])
np.savetxt(filename + '.csv', data[:, :], delimiter=",")
def create_labels_trajectory(labels_arr):
r, c = labels_arr.shape
# input embed: x, y, cost, sint, a
final_labels_arr = np.zeros((r, c+1))
## if logging collisions
# input_arr_embed = np.zeros((r, c+2))
final_labels_arr[:, :2] = labels_arr[:, :2]
final_labels_arr[:, 2] = np.cos(labels_arr[:, 2])
final_labels_arr[:, 3] = np.sin(labels_arr[:, 2])
return final_labels_arr
def create_input_trajectory(final_input_arr):
r, c = final_input_arr.shape
# input embed: x, y, cost, sint, a
input_arr_embed = np.zeros((r, c+1))
## if logging collisions
# input_arr_embed = np.zeros((r, c+2))
input_arr_embed[:, :2] = final_input_arr[:, :2]
input_arr_embed[:, 2] = np.cos(final_input_arr[:, 2])
input_arr_embed[:, 3] = np.sin(final_input_arr[:, 2])
input_arr_embed[:, 4] = final_input_arr[:, 3]
## if logging collisions
# input_arr_embed[:, 5] = final_input_arr[:, 4]
# input oneHot: x, y, cost, sint, a1, a2, a3
input_arr_oneHot = convert_embedding(input_arr_embed)
return input_arr_embed, input_arr_oneHot
def create_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return
def get_last_idx(dir_path):
f = sorted(os.listdir(dir_path))
if not f:
ctr = 0
else:
ctr = int(f[-1].split('.')[0].split('_')[-1]) +1
return ctr
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str, required=True)
# parser.add_argument("--noisy", action="store_true")
parser.add_argument("--noise", type=str, required=True)
parser.add_argument("--save-imgs", action="store_true")
parser.add_argument("--save-traj", action="store_true")
parser.add_argument("--data-split", type=str, required=True)
parser.add_argument("--sensors", type=str, required=True)
parser.add_argument("--hidden-size", type=int, required=True)
parser.add_argument(
"--normalize-visual-inputs", type=int, required=True, choices=[0, 1]
)
parser.add_argument("--depth-only", action="store_true")
parser.add_argument("--use-gan", action="store_true")
parser.add_argument("--gan-weights", type=str, required=False)
parser.add_argument("--noise-type", type=str, required=True)
parser.add_argument(
"--backbone",
type=str,
required=True,
choices=["resnet50", "se_resneXt50"],
)
parser.add_argument("--num-recurrent-layers", type=int, required=True)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
args = parser.parse_args()
# Check torch version
# vtorch = "1.2.0"
#x assert torch.__version__ == vtorch, "Please use torch {}".format(vtorch)
if args.noise_type == 'poisson_ilqr':
if args.noise == 'all':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml"
elif args.noise == 'actuation':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_ilqr.yaml"
elif args.noise == 'sensors':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_poisson.yaml"
elif args.noise == 'no_noise':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml"
else:
print('no noise specified. using all noise')
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml"
elif args.noise_type == 'speckle_mb':
if args.noise == 'all':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_speckle_mb.yaml"
elif args.noise == 'actuation':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_mb.yaml"
elif args.noise == 'sensors':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_speckle.yaml"
elif args.noise == 'no_noise':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml"
else:
print('no noise specified. using all noise')
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml"
elif args.noise_type == 'gaussian_proportional':
if args.noise == 'all':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml"
elif args.noise == 'actuation':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_proportional.yaml"
elif args.noise == 'sensors':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_gaussian.yaml"
elif args.noise == 'no_noise':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml"
else:
print('no noise specified. using all noise')
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml"
config = get_config(
cfg_file, args.opts
)
if args.save_traj:
datasplit = args.data_split.split('_')[1]
split = 'train'
if datasplit == 'med':
split = 'test'
if args.save_imgs:
if args.noise!="no_noise":
depth_save_path = 'depth_' + config.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL + '_' + split
rgb_save_path = 'rgb_' + config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL + '_' + str(config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.intensity_constant) + '_' + split
else:
depth_save_path = 'depth_no_noise_' + split
rgb_save_path = 'rgb_no_noise_' + split
if args.save_traj:
if args.noise!="no_noise":
traj_save_path = 'traj_' + config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.CONTROLLER + '_' + str(config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER) + '_' + split
else:
traj_save_path = 'traj_no_noise_' + split
config.defrost()
config.TASK_CONFIG.TASK.BASE_STATE = habitat.Config()
config.TASK_CONFIG.TASK.BASE_STATE.TYPE = "BaseState"
# Add the measure to the list of measures in use
config.TASK_CONFIG.TASK.MEASUREMENTS.append("BASE_STATE")
if args.sensors == "":
config.SENSORS = []
else:
config.SENSORS = args.sensors.split(",")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("SOFT_SPL")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("EPISODE_DISTANCE")
config.freeze()
envs = construct_envs(config, get_env_class(config.ENV_NAME))
sensors_obs = envs.observation_spaces[0]
if args.depth_only:
config.defrost()
config.SENSORS=["DEPTH_SENSOR"]
config.freeze()
envs2 = construct_envs(config, get_env_class(config.ENV_NAME))
sensors_obs = envs2.observation_spaces[0]
device = (
torch.device("cuda:{}".format(config.TORCH_GPU_ID))
if torch.cuda.is_available()
else torch.device("cpu")
)
model = load_model(
path=args.model_path,
observation_space=sensors_obs,
# observation_space=envs.observation_spaces[0],
action_space=envs.action_spaces[0],
hidden_size=args.hidden_size,
normalize_visual_inputs=bool(args.normalize_visual_inputs),
backbone=args.backbone,
num_recurrent_layers=args.num_recurrent_layers,
device=device,
)
model.eval()
if args.use_gan:
predictor = Predictor(args.gan_weights)
print('METRICS: ', config.TASK_CONFIG.TASK.MEASUREMENTS)
metric_name = "SPL"
metric_cfg = getattr(config.TASK_CONFIG.TASK, metric_name)
measure_type = baseline_registry.get_measure(metric_cfg.TYPE)
assert measure_type is not None, "invalid measurement type {}".format(
metric_cfg.TYPE
)
metric_uuid = measure_type(None, None)._get_uuid()
print('METRIC UUID: ', metric_uuid)
observations = envs.reset()
print('IMAGE TYPE: ' , observations[0]["rgb"].dtype, observations[0]["depth"].dtype)
# print(observations[0]["rgb"], observations[0]["depth"])
rgbd_img = np.dstack((observations[0]["rgb"], (observations[0]["depth"]*255).astype(np.uint8)))
gan_observations = predictor(rgbd_img)
observations[0]["depth"] = np.expand_dims((gan_observations[:,:,-1]/255).astype(np.float32), axis=2)
# print('IMAGE TYPE: ' , observations[0]["rgb"].dtype, observations[0]["depth"].dtype)
# print(observations[0]["rgb"], observations[0]["depth"])
#observations[0]["rgb"] = gan_observations[:,:,:3][...,::-1]
if args.depth_only:
del observations[0]["rgb"]
else:
# print('GAN TYPE: ', gan_observations[:,:,:3][...,::-1].dtype)
observations[0]["rgb"] = gan_observations[:,:,:3][...,::-1]
batch = batch_obs(observations, device)
current_episode_reward = torch.zeros(envs.num_envs, 1, device=device)
test_recurrent_hidden_states = torch.zeros(
model.net.num_recurrent_layers,
config.NUM_PROCESSES,
args.hidden_size,
device=device,
)
prev_actions = torch.zeros(
config.NUM_PROCESSES, 1, device=device, dtype=torch.long
)
not_done_masks = torch.zeros(config.NUM_PROCESSES, 1, device=device)
stats_episodes = dict() # dict of dicts that stores stats per episode
stats_actions = defaultdict(int)
rgb_frames = [
[] for _ in range(config.NUM_PROCESSES)
] # type: List[List[np.ndarray]]
if len(config.VIDEO_OPTION) > 0:
os.makedirs(config.VIDEO_DIR, exist_ok=True)
sensor_path = 'sim_sensor_imgs'
traj_path = 'sim_traj'
if args.save_imgs:
depth_dir = os.path.join(sensor_path, depth_save_path)
rgb_dir = os.path.join(sensor_path, rgb_save_path)
create_dir(depth_dir)
create_dir(rgb_dir)
img_ctr = get_last_idx(depth_dir)
if args.save_traj:
traj_dir = os.path.join(traj_path, traj_save_path)
create_dir(traj_dir)
traj_ctr = get_last_idx(traj_dir)
## not logging collisions
final_input_arr = np.array([0, 0, 0, 0])
## if logging collisions
# input_arr = np.array([0, 0, 0, 0, 0])
# final_input_arr = np.array([0, 0, 0, 0, 0])
tmp_labels_arr = np.array([0, 0, 0])
prev_base_state = [0, 0, 0]
num_actions = 0
# datasplit = args.data_split.split('_')[1]
print_once = True
called_stop = False
while (
len(stats_episodes) < config.TEST_EPISODE_COUNT and envs.num_envs > 0
):
current_episodes = envs.current_episodes()
if print_once:
print("Ep_id: ", current_episodes[0].episode_id, "Start_pos: ", current_episodes[0].start_position, current_episodes[0].start_rotation, "Goal_pos: ", current_episodes[0].goals[0].position)
print_once = False | with torch.no_grad():
_, actions, _, test_recurrent_hidden_states = model.act(
batch,
test_recurrent_hidden_states,
prev_actions,
not_done_masks,
deterministic=False,
)
prev_actions.copy_(actions)
outputs = envs.step([a[0].item() for a in actions])
num_actions +=1
for a in actions:
stats_actions[a[0].item()] += 1
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
if args.save_imgs:
depth_obs = observations[0]["depth"]
depth_obs = np.squeeze(depth_obs)
depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode="L")
depth_img.save(os.path.join(depth_dir, "real_depth_" + "%05d"%img_ctr + ".jpg"), "JPEG")
rgb_obs = observations[0]["rgb"]
rgb_img = Image.fromarray(rgb_obs, mode="RGB")
rgb_img.save(os.path.join(rgb_dir, "real_rgb_" + "%05d"%img_ctr + ".jpg"), "JPEG")
rgbd_img = np.dstack((observations[0]["rgb"], (observations[0]["depth"]*255).astype(np.uint8)))
gan_observations = predictor(rgbd_img)
observations[0]["rgb"] = gan_observations[:,:,:3][...,::-1]
observations[0]["depth"] = np.expand_dims((gan_observations[:,:,-1]/255).astype(np.float32), axis=2)
if args.save_imgs:
depth_obs = observations[0]["depth"]
depth_obs = np.squeeze(depth_obs)
depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode="L")
depth_img.save(os.path.join(depth_dir, "sim_depth_" + "%05d"%img_ctr + ".jpg"), "JPEG")
rgb_obs = observations[0]["rgb"]
rgb_img = Image.fromarray(rgb_obs, mode="RGB")
rgb_img.save(os.path.join(rgb_dir, "sim_rgb_" + "%05d"%img_ctr + ".jpg"), "JPEG")
img_ctr +=1
if args.depth_only:
del observations[0]["rgb"]
batch = batch_obs(observations, device)
not_done_masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=device,
)
rewards = torch.tensor(
rewards, dtype=torch.float, device=device
).unsqueeze(1)
current_episode_reward += rewards
next_episodes = envs.current_episodes()
envs_to_pause = []
n_envs = envs.num_envs
for i in range(n_envs):
if (
next_episodes[i].scene_id,
next_episodes[i].episode_id,
) in stats_episodes:
envs_to_pause.append(i)
# x, y, t, a
input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()]
#input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()] + [int(infos[i]["collisions"]["is_collision"])]
curr_state = create_state(infos[i]["base_state"]['position'], infos[i]["base_state"]['rotation'])
delta_row = np.subtract(curr_state, prev_base_state)
prev_base_state = curr_state
print(input_row + [int(infos[i]["collisions"]["is_collision"])])
if int(infos[i]["collisions"]["is_collision"]) == 0:
final_input_arr = np.vstack((final_input_arr, input_row))
tmp_labels_arr = np.vstack((tmp_labels_arr, delta_row))
# plt.ioff()
# _ = plt.hist(observations[i]["depth"].flatten(), bins='auto')
# plt.savefig('hist.jpg')
# TODO: save only good trajectories
# episode ended
if not_done_masks[i].item() == 0:
episode_stats = dict()
episode_stats[metric_uuid] = infos[i][metric_uuid]
episode_stats["success"] = int(infos[i][metric_uuid] > 0)
episode_stats["reward"] = current_episode_reward[i].item()
if actions[i][0].cpu().detach().tolist() == 0:
called_stop = True
# if infos[i]["collisions"] == 0:
# final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))
# final_labels_arr = np.vstack((final_labels_arr, labels_arr[2:-1,:]))
# final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))
# final_labels_arr = np.vstack((final_labels_arr, create_traj_labels(input_arr[2:, :])))
print(final_input_arr.ndim)
if final_input_arr.ndim > 1:
print("Final Shape: {}".format(final_input_arr[2:-1, :].shape))
input_arr_embed, input_arr_oneHot = create_input_trajectory(final_input_arr[2:-1, :])
final_labels_arr = create_labels_trajectory(tmp_labels_arr[2:-1, :])
if args.save_traj:
save_trajectory(input_arr_embed, datasplit, traj_dir, traj_ctr, 'input', embed_type="embed")
save_trajectory(input_arr_oneHot, datasplit, traj_dir, traj_ctr, 'input', embed_type="oneHot")
save_trajectory(final_labels_arr, datasplit, traj_dir, traj_ctr, 'labels', embed_type="")
traj_ctr +=1
print("# Actions: {}".format(num_actions))
print("# Collisions: {}".format(infos[i]["collisions"]["count"]))
print("Success: {}".format(episode_stats["success"]))
print("Agent Episode Distance: {}".format(infos[i]['episode_distance']['agent_episode_distance'])) #TODO
print("Final Distance to Goal: {}".format(infos[i]['episode_distance']['goal_distance'])) #TODO
print("SPL: {}".format(episode_stats[metric_uuid]))
print("Soft SPL: {}".format(infos[i]["softspl"]))
print("Called Stop: {}".format(called_stop))
current_episode_reward[i] = 0
## not logging collisions
final_input_arr = np.array([0, 0, 0, 0])
## if logging collisions
# input_arr = np.array([0, 0, 0, 0, 0])
# final_input_arr = np.array([0, 0, 0, 0, 0])
tmp_labels_arr = np.array([0, 0, 0])
prev_base_state = [0, 0, 0]
num_actions = 0
print_once = True
called_stop = False
# use scene_id + episode_id as unique id for storing stats
stats_episodes[
(
current_episodes[i].scene_id,
current_episodes[i].episode_id,
)
] = episode_stats
if len(config.VIDEO_OPTION) > 0:
metric_value = episode_stats[metric_uuid]
video_name = (
f"episode_{current_episodes[i].episode_id}"
f"_{metric_name}_{metric_value:.2f}"
)
images_to_video(
rgb_frames[i], config.VIDEO_DIR, video_name
)
rgb_frames[i] = []
print("Episodes finished: {}".format(len(stats_episodes)))
# episode continues
elif len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i])
rgb_frames[i].append(frame)
# pausing self.envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
test_recurrent_hidden_states = test_recurrent_hidden_states[
:, state_index
]
not_done_masks = not_done_masks[state_index]
current_episode_reward = current_episode_reward[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
if len(config.VIDEO_OPTION) > 0:
rgb_frames = [rgb_frames[i] for i in state_index]
aggregated_stats = dict()
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = sum(
[v[stat_key] for v in stats_episodes.values()]
)
num_episodes = len(stats_episodes)
episode_reward_mean = aggregated_stats["reward"] / num_episodes
episode_metric_mean = aggregated_stats[metric_uuid] / num_episodes
episode_success_mean = aggregated_stats["success"] / num_episodes
print(f"Number of episodes: {num_episodes}")
print(f"Average episode reward: {episode_reward_mean:.6f}")
print(f"Average episode success: {episode_success_mean:.6f}")
print(f"Average episode {metric_uuid}: {episode_metric_mean:.6f}")
print("Stats actions:", stats_actions)
envs.close()
if __name__ == "__main__":
main() | |
test_example.py | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
@pytest.fixture(scope="session")
def driver(request):
wd=webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
| driver.find_element_by_name("login").click()
WebDriverWait(driver,10).until(EC.title_is("My Store"))
print('Поставим sleep перед закрытием браузера на 5 секунд для отладки')
time.sleep(5) | def test_admin_login(driver):
driver.get("http://localhost/litecart/admin")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("123456") |
main.go | //go:build scripts
package main
import "github.com/mwieser/go-micro/scripts/cmd"
func main() | {
cmd.Execute()
} |
|
type_test.go | // Copyright 2019-present Facebook Inc. All rights reserved.
// This source code is licensed under the Apache 2.0 license found
// in the LICENSE file in the root directory of this source tree.
package integration
import (
"context"
"math"
"net"
"net/http"
"net/url"
"testing"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/entc/integration/ent"
"entgo.io/ent/entc/integration/ent/fieldtype"
"entgo.io/ent/entc/integration/ent/role"
"entgo.io/ent/entc/integration/ent/schema"
"entgo.io/ent/entc/integration/ent/task"
"github.com/stretchr/testify/require"
)
func | (t *testing.T, client *ent.Client) {
ctx := context.Background()
require := require.New(t)
link, err := url.Parse("localhost")
require.NoError(err)
ft := client.FieldType.Create().
SetInt(1).
SetInt8(8).
SetInt16(16).
SetInt32(32).
SetInt64(64).
SaveX(ctx)
require.NotEmpty(t, ft.ID)
require.Equal(1, ft.Int)
require.Equal(int8(8), ft.Int8)
require.Equal(int16(16), ft.Int16)
require.Equal(int32(32), ft.Int32)
require.Equal(int64(64), ft.Int64)
ft = client.FieldType.Create().
SetInt(1).
SetInt8(math.MinInt8).
SetInt16(math.MinInt16).
SetInt32(math.MinInt16).
SetInt64(math.MinInt16).
SetOptionalInt8(math.MinInt8).
SetOptionalInt16(math.MinInt16).
SetOptionalInt32(math.MinInt32).
SetOptionalInt64(math.MinInt64).
SetNillableInt8(math.MinInt8).
SetNillableInt16(math.MinInt16).
SetNillableInt32(math.MinInt32).
SetNillableInt64(math.MinInt64).
SetDir("dir").
SetNdir("ndir").
SetNullStr(sql.NullString{String: "not-default", Valid: true}).
SetLink(schema.Link{URL: link}).
SetLinkOther(schema.Link{URL: link}).
SetNullLink(schema.Link{URL: link}).
SetRole(role.Admin).
SetDuration(time.Hour).
SaveX(ctx)
require.Equal(int8(math.MinInt8), ft.OptionalInt8)
require.Equal(int16(math.MinInt16), ft.OptionalInt16)
require.Equal(int32(math.MinInt32), ft.OptionalInt32)
require.Equal(int64(math.MinInt64), ft.OptionalInt64)
require.Equal(int8(math.MinInt8), *ft.NillableInt8)
require.Equal(int16(math.MinInt16), *ft.NillableInt16)
require.Equal(int32(math.MinInt32), *ft.NillableInt32)
require.Equal(int64(math.MinInt64), *ft.NillableInt64)
require.Equal(http.Dir("dir"), ft.Dir)
require.NotNil(*ft.Ndir)
require.Equal(http.Dir("ndir"), *ft.Ndir)
require.Equal("default", ft.Str.String)
require.Equal("not-default", ft.NullStr.String)
require.Equal("localhost", ft.Link.String())
require.Equal("localhost", ft.LinkOther.String())
require.Equal("localhost", ft.NullLink.String())
require.Equal(net.IP("127.0.0.1").String(), ft.IP.String())
mac, err := net.ParseMAC("3b:b3:6b:3c:10:79")
require.NoError(err)
dt, err := time.Parse(time.RFC3339, "1906-01-02T00:00:00+00:00")
require.NoError(err)
ft = ft.Update().
SetInt(1).
SetInt8(math.MaxInt8).
SetInt16(math.MaxInt16).
SetInt32(math.MaxInt16).
SetInt64(math.MaxInt16).
SetOptionalInt8(math.MaxInt8).
SetOptionalInt16(math.MaxInt16).
SetOptionalInt32(math.MaxInt32).
SetOptionalInt64(math.MaxInt64).
SetNillableInt8(math.MaxInt8).
SetNillableInt16(math.MaxInt16).
SetNillableInt32(math.MaxInt32).
SetNillableInt64(math.MaxInt64).
SetDatetime(dt).
SetDecimal(10.20).
SetDir("dir").
SetNdir("ndir").
SetStr(sql.NullString{String: "str", Valid: true}).
SetNullStr(sql.NullString{String: "str", Valid: true}).
SetLink(schema.Link{URL: link}).
SetNullLink(schema.Link{URL: link}).
SetLinkOther(schema.Link{URL: link}).
SetSchemaInt(64).
SetSchemaInt8(8).
SetSchemaInt64(64).
SetMAC(schema.MAC{HardwareAddr: mac}).
SaveX(ctx)
require.Equal(int8(math.MaxInt8), ft.OptionalInt8)
require.Equal(int16(math.MaxInt16), ft.OptionalInt16)
require.Equal(int32(math.MaxInt32), ft.OptionalInt32)
require.Equal(int64(math.MaxInt64), ft.OptionalInt64)
require.Equal(int8(math.MaxInt8), *ft.NillableInt8)
require.Equal(int16(math.MaxInt16), *ft.NillableInt16)
require.Equal(int32(math.MaxInt32), *ft.NillableInt32)
require.Equal(int64(math.MaxInt64), *ft.NillableInt64)
require.Equal(10.20, ft.Decimal)
require.True(dt.Equal(ft.Datetime))
require.Equal(http.Dir("dir"), ft.Dir)
require.NotNil(*ft.Ndir)
require.Equal(http.Dir("ndir"), *ft.Ndir)
require.Equal("str", ft.Str.String)
require.Equal("str", ft.NullStr.String)
require.Equal("localhost", ft.Link.String())
require.Equal("localhost", ft.LinkOther.String())
require.Equal("localhost", ft.NullLink.String())
require.Equal(schema.Int(64), ft.SchemaInt)
require.Equal(schema.Int8(8), ft.SchemaInt8)
require.Equal(schema.Int64(64), ft.SchemaInt64)
require.Equal(mac.String(), ft.MAC.String())
exists, err := client.FieldType.Query().Where(fieldtype.DurationLT(time.Hour * 2)).Exist(ctx)
require.NoError(err)
require.True(exists)
exists, err = client.FieldType.Query().Where(fieldtype.DurationLT(time.Hour)).Exist(ctx)
require.NoError(err)
require.False(exists)
_, err = client.Task.CreateBulk(
client.Task.Create().SetPriority(schema.PriorityLow),
client.Task.Create().SetPriority(schema.PriorityMid),
client.Task.Create().SetPriority(schema.PriorityHigh),
).Save(ctx)
require.NoError(err)
_, err = client.Task.Create().SetPriority(schema.Priority(10)).Save(ctx)
require.Error(err)
tasks := client.Task.Query().Order(ent.Asc(task.FieldPriority)).AllX(ctx)
require.Equal(schema.PriorityLow, tasks[0].Priority)
require.Equal(schema.PriorityMid, tasks[1].Priority)
require.Equal(schema.PriorityHigh, tasks[2].Priority)
tasks = client.Task.Query().Order(ent.Desc(task.FieldPriority)).AllX(ctx)
require.Equal(schema.PriorityLow, tasks[2].Priority)
require.Equal(schema.PriorityMid, tasks[1].Priority)
require.Equal(schema.PriorityHigh, tasks[0].Priority)
}
| Types |
haml.js | !(function(){var ace = window.___ace___;
ace.define("ace/snippets/haml",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.snippetText = "snippet t\n\
%table\n\
%tr\n\
%th\n\
${1:headers}\n\
%tr\n\
%td\n\
${2:headers}\n\
snippet ul\n\
%ul\n\
%li\n\
${1:item}\n\
%li\n\
snippet =rp\n\
= render :partial => '${1:partial}'\n\
snippet =rpl\n\
= render :partial => '${1:partial}', :locals => {}\n\
snippet =rpc\n\
= render :partial => '${1:partial}', :collection => @$1\n\
\n\
";
exports.scope = "haml";
}); | })(); | |
views.py | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from guardian.decorators import permission_required
from apps.dashboard.tools import get_base_context, has_access
@login_required
@permission_required('hobbygroups.change_hobby', return_403=True)
def | (request):
if not has_access(request):
raise PermissionDenied
context = get_base_context(request)
return render(request, 'hobbygroups/dashboard/index.html', context)
| index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.