hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
6990407ab0a9fa6e2bcd9e09953b2b76f37ede3f | 4,161 | #[allow(dead_code)]
mod util;
use crate::util::event::{Event, Events};
use std::{error::Error, io};
use termion::{event::Key, input::MouseTerminal, raw::IntoRawMode, screen::AlternateScreen};
use tui::{
backend::TermionBackend,
layout::{Constraint, Layout},
style::{Color, Modifier, Style},
widgets::{Block, Borders, Row, Table, TableState},
Terminal,
};
pub struct StatefulTable<'a> {
state: TableState,
items: Vec<Vec<&'a str>>,
}
impl<'a> StatefulTable<'a> {
fn new() -> StatefulTable<'a> {
StatefulTable {
state: TableState::default(),
items: vec![
vec!["Row11", "Row12", "Row13"],
vec!["Row21", "Row22", "Row23"],
vec!["Row31", "Row32", "Row33"],
vec!["Row41", "Row42", "Row43"],
vec!["Row51", "Row52", "Row53"],
vec!["Row61", "Row62", "Row63"],
vec!["Row71", "Row72", "Row73"],
vec!["Row81", "Row82", "Row83"],
vec!["Row91", "Row92", "Row93"],
vec!["Row101", "Row102", "Row103"],
vec!["Row111", "Row112", "Row113"],
vec!["Row121", "Row122", "Row123"],
vec!["Row131", "Row132", "Row133"],
vec!["Row141", "Row142", "Row143"],
vec!["Row151", "Row152", "Row153"],
vec!["Row161", "Row162", "Row163"],
vec!["Row171", "Row172", "Row173"],
vec!["Row181", "Row182", "Row183"],
vec!["Row191", "Row192", "Row193"],
],
}
}
pub fn next(&mut self) {
let i = match self.state.selected() {
Some(i) => {
if i >= self.items.len() - 1 {
0
} else {
i + 1
}
}
None => 0,
};
self.state.select(Some(i));
}
pub fn previous(&mut self) {
let i = match self.state.selected() {
Some(i) => {
if i == 0 {
self.items.len() - 1
} else {
i - 1
}
}
None => 0,
};
self.state.select(Some(i));
}
}
fn main() -> Result<(), Box<dyn Error>> {
// Terminal initialization
let stdout = io::stdout().into_raw_mode()?;
let stdout = MouseTerminal::from(stdout);
let stdout = AlternateScreen::from(stdout);
let backend = TermionBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
terminal.hide_cursor()?;
let events = Events::new();
let mut table = StatefulTable::new();
// Input
loop {
terminal.draw(|mut f| {
let rects = Layout::default()
.constraints([Constraint::Percentage(100)].as_ref())
.margin(5)
.split(f.size());
let selected_style = Style::default().fg(Color::Yellow).modifier(Modifier::BOLD);
let normal_style = Style::default().fg(Color::White);
let header = ["Header1", "Header2", "Header3"];
let rows = table
.items
.iter()
.map(|i| Row::StyledData(i.iter(), normal_style));
let t = Table::new(header.iter(), rows)
.block(Block::default().borders(Borders::ALL).title("Table"))
.highlight_style(selected_style)
.highlight_symbol(">> ")
.widths(&[
Constraint::Percentage(50),
Constraint::Length(30),
Constraint::Max(10),
]);
f.render_stateful_widget(t, rects[0], &mut table.state);
})?;
if let Event::Input(key) = events.next()? {
match key {
Key::Char('q') => {
break;
}
Key::Down => {
table.next();
}
Key::Up => {
table.previous();
}
_ => {}
}
};
}
Ok(())
}
| 31.052239 | 93 | 0.437155 |
282f9157bf9396cab1f98345cba2debab720d152 | 1,302 | use crate::client::conn::ClientConnData;
use crate::client::conn::ClientStream;
use crate::client::conn::ClientStreamData;
use crate::client::conn::ClientToWriteMessage;
use crate::client::stream_handler::ClientResponseStreamHandlerHolder;
use crate::common::client_or_server::ClientOrServer;
use crate::common::types::Types;
use crate::net::socket::SocketStream;
use crate::req_resp::RequestOrResponse;
use crate::solicit::frame::SettingsFrame;
use crate::solicit_async::client_handshake;
use std::future::Future;
use std::pin::Pin;
#[derive(Clone, Default)]
pub struct ClientTypes;
impl Types for ClientTypes {
type HttpStreamData = ClientStream;
type HttpStreamSpecific = ClientStreamData;
type SideSpecific = ClientConnData;
type StreamHandlerHolder = ClientResponseStreamHandlerHolder;
type ToWriteMessage = ClientToWriteMessage;
const CLIENT_OR_SERVER: ClientOrServer = ClientOrServer::Client;
const OUT_REQUEST_OR_RESPONSE: RequestOrResponse = RequestOrResponse::Request;
const CONN_NDC: &'static str = "client conn";
fn handshake<'a, I: SocketStream>(
conn: &'a mut I,
settings_frame: SettingsFrame,
) -> Pin<Box<dyn Future<Output = crate::Result<()>> + Send + 'a>> {
Box::pin(client_handshake(conn, settings_frame))
}
}
| 35.189189 | 82 | 0.746544 |
5b9dd69af44e919a57686f4e542ebaff52096419 | 239,183 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
client: aws_smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// Client for Amazon CloudWatch Logs
///
/// Client for invoking operations on Amazon CloudWatch Logs. Each operation on Amazon CloudWatch Logs is a method on this
/// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service.
///
/// # Examples
/// **Constructing a client and invoking an operation**
/// ```rust,no_run
/// # async fn docs() {
/// // create a shared configuration. This can be used & shared between multiple service clients.
/// let shared_config = aws_config::load_from_env().await;
/// let client = aws_sdk_cloudwatchlogs::Client::new(&shared_config);
/// // invoke an operation
/// /* let rsp = client
/// .<operationname>().
/// .<param>("some value")
/// .send().await; */
/// # }
/// ```
/// **Constructing a client with custom configuration**
/// ```rust,no_run
/// use aws_config::RetryConfig;
/// # async fn docs() {
/// let shared_config = aws_config::load_from_env().await;
/// let config = aws_sdk_cloudwatchlogs::config::Builder::from(&shared_config)
/// .retry_config(RetryConfig::disabled())
/// .build();
/// let client = aws_sdk_cloudwatchlogs::Client::from_conf(config);
/// # }
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `AssociateKmsKey` operation.
///
/// See [`AssociateKmsKey`](crate::client::fluent_builders::AssociateKmsKey) for more information about the
/// operation and its arguments.
pub fn associate_kms_key(&self) -> fluent_builders::AssociateKmsKey<C, M, R> {
fluent_builders::AssociateKmsKey::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CancelExportTask` operation.
///
/// See [`CancelExportTask`](crate::client::fluent_builders::CancelExportTask) for more information about the
/// operation and its arguments.
pub fn cancel_export_task(&self) -> fluent_builders::CancelExportTask<C, M, R> {
fluent_builders::CancelExportTask::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateExportTask` operation.
///
/// See [`CreateExportTask`](crate::client::fluent_builders::CreateExportTask) for more information about the
/// operation and its arguments.
pub fn create_export_task(&self) -> fluent_builders::CreateExportTask<C, M, R> {
fluent_builders::CreateExportTask::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateLogGroup` operation.
///
/// See [`CreateLogGroup`](crate::client::fluent_builders::CreateLogGroup) for more information about the
/// operation and its arguments.
pub fn create_log_group(&self) -> fluent_builders::CreateLogGroup<C, M, R> {
fluent_builders::CreateLogGroup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateLogStream` operation.
///
/// See [`CreateLogStream`](crate::client::fluent_builders::CreateLogStream) for more information about the
/// operation and its arguments.
pub fn create_log_stream(&self) -> fluent_builders::CreateLogStream<C, M, R> {
fluent_builders::CreateLogStream::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteDestination` operation.
///
/// See [`DeleteDestination`](crate::client::fluent_builders::DeleteDestination) for more information about the
/// operation and its arguments.
pub fn delete_destination(&self) -> fluent_builders::DeleteDestination<C, M, R> {
fluent_builders::DeleteDestination::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteLogGroup` operation.
///
/// See [`DeleteLogGroup`](crate::client::fluent_builders::DeleteLogGroup) for more information about the
/// operation and its arguments.
pub fn delete_log_group(&self) -> fluent_builders::DeleteLogGroup<C, M, R> {
fluent_builders::DeleteLogGroup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteLogStream` operation.
///
/// See [`DeleteLogStream`](crate::client::fluent_builders::DeleteLogStream) for more information about the
/// operation and its arguments.
pub fn delete_log_stream(&self) -> fluent_builders::DeleteLogStream<C, M, R> {
fluent_builders::DeleteLogStream::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteMetricFilter` operation.
///
/// See [`DeleteMetricFilter`](crate::client::fluent_builders::DeleteMetricFilter) for more information about the
/// operation and its arguments.
pub fn delete_metric_filter(&self) -> fluent_builders::DeleteMetricFilter<C, M, R> {
fluent_builders::DeleteMetricFilter::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteQueryDefinition` operation.
///
/// See [`DeleteQueryDefinition`](crate::client::fluent_builders::DeleteQueryDefinition) for more information about the
/// operation and its arguments.
pub fn delete_query_definition(&self) -> fluent_builders::DeleteQueryDefinition<C, M, R> {
fluent_builders::DeleteQueryDefinition::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteResourcePolicy` operation.
///
/// See [`DeleteResourcePolicy`](crate::client::fluent_builders::DeleteResourcePolicy) for more information about the
/// operation and its arguments.
pub fn delete_resource_policy(&self) -> fluent_builders::DeleteResourcePolicy<C, M, R> {
fluent_builders::DeleteResourcePolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteRetentionPolicy` operation.
///
/// See [`DeleteRetentionPolicy`](crate::client::fluent_builders::DeleteRetentionPolicy) for more information about the
/// operation and its arguments.
pub fn delete_retention_policy(&self) -> fluent_builders::DeleteRetentionPolicy<C, M, R> {
fluent_builders::DeleteRetentionPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteSubscriptionFilter` operation.
///
/// See [`DeleteSubscriptionFilter`](crate::client::fluent_builders::DeleteSubscriptionFilter) for more information about the
/// operation and its arguments.
pub fn delete_subscription_filter(&self) -> fluent_builders::DeleteSubscriptionFilter<C, M, R> {
fluent_builders::DeleteSubscriptionFilter::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeDestinations` operation.
///
/// See [`DescribeDestinations`](crate::client::fluent_builders::DescribeDestinations) for more information about the
/// operation and its arguments.
pub fn describe_destinations(&self) -> fluent_builders::DescribeDestinations<C, M, R> {
fluent_builders::DescribeDestinations::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeExportTasks` operation.
///
/// See [`DescribeExportTasks`](crate::client::fluent_builders::DescribeExportTasks) for more information about the
/// operation and its arguments.
pub fn describe_export_tasks(&self) -> fluent_builders::DescribeExportTasks<C, M, R> {
fluent_builders::DescribeExportTasks::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeLogGroups` operation.
///
/// See [`DescribeLogGroups`](crate::client::fluent_builders::DescribeLogGroups) for more information about the
/// operation and its arguments.
pub fn describe_log_groups(&self) -> fluent_builders::DescribeLogGroups<C, M, R> {
fluent_builders::DescribeLogGroups::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeLogStreams` operation.
///
/// See [`DescribeLogStreams`](crate::client::fluent_builders::DescribeLogStreams) for more information about the
/// operation and its arguments.
pub fn describe_log_streams(&self) -> fluent_builders::DescribeLogStreams<C, M, R> {
fluent_builders::DescribeLogStreams::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeMetricFilters` operation.
///
/// See [`DescribeMetricFilters`](crate::client::fluent_builders::DescribeMetricFilters) for more information about the
/// operation and its arguments.
pub fn describe_metric_filters(&self) -> fluent_builders::DescribeMetricFilters<C, M, R> {
fluent_builders::DescribeMetricFilters::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeQueries` operation.
///
/// See [`DescribeQueries`](crate::client::fluent_builders::DescribeQueries) for more information about the
/// operation and its arguments.
pub fn describe_queries(&self) -> fluent_builders::DescribeQueries<C, M, R> {
fluent_builders::DescribeQueries::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeQueryDefinitions` operation.
///
/// See [`DescribeQueryDefinitions`](crate::client::fluent_builders::DescribeQueryDefinitions) for more information about the
/// operation and its arguments.
pub fn describe_query_definitions(&self) -> fluent_builders::DescribeQueryDefinitions<C, M, R> {
fluent_builders::DescribeQueryDefinitions::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeResourcePolicies` operation.
///
/// See [`DescribeResourcePolicies`](crate::client::fluent_builders::DescribeResourcePolicies) for more information about the
/// operation and its arguments.
pub fn describe_resource_policies(&self) -> fluent_builders::DescribeResourcePolicies<C, M, R> {
fluent_builders::DescribeResourcePolicies::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeSubscriptionFilters` operation.
///
/// See [`DescribeSubscriptionFilters`](crate::client::fluent_builders::DescribeSubscriptionFilters) for more information about the
/// operation and its arguments.
pub fn describe_subscription_filters(
&self,
) -> fluent_builders::DescribeSubscriptionFilters<C, M, R> {
fluent_builders::DescribeSubscriptionFilters::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DisassociateKmsKey` operation.
///
/// See [`DisassociateKmsKey`](crate::client::fluent_builders::DisassociateKmsKey) for more information about the
/// operation and its arguments.
pub fn disassociate_kms_key(&self) -> fluent_builders::DisassociateKmsKey<C, M, R> {
fluent_builders::DisassociateKmsKey::new(self.handle.clone())
}
/// Constructs a fluent builder for the `FilterLogEvents` operation.
///
/// See [`FilterLogEvents`](crate::client::fluent_builders::FilterLogEvents) for more information about the
/// operation and its arguments.
pub fn filter_log_events(&self) -> fluent_builders::FilterLogEvents<C, M, R> {
fluent_builders::FilterLogEvents::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetLogEvents` operation.
///
/// See [`GetLogEvents`](crate::client::fluent_builders::GetLogEvents) for more information about the
/// operation and its arguments.
pub fn get_log_events(&self) -> fluent_builders::GetLogEvents<C, M, R> {
fluent_builders::GetLogEvents::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetLogGroupFields` operation.
///
/// See [`GetLogGroupFields`](crate::client::fluent_builders::GetLogGroupFields) for more information about the
/// operation and its arguments.
pub fn get_log_group_fields(&self) -> fluent_builders::GetLogGroupFields<C, M, R> {
fluent_builders::GetLogGroupFields::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetLogRecord` operation.
///
/// See [`GetLogRecord`](crate::client::fluent_builders::GetLogRecord) for more information about the
/// operation and its arguments.
pub fn get_log_record(&self) -> fluent_builders::GetLogRecord<C, M, R> {
fluent_builders::GetLogRecord::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetQueryResults` operation.
///
/// See [`GetQueryResults`](crate::client::fluent_builders::GetQueryResults) for more information about the
/// operation and its arguments.
pub fn get_query_results(&self) -> fluent_builders::GetQueryResults<C, M, R> {
fluent_builders::GetQueryResults::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListTagsLogGroup` operation.
///
/// See [`ListTagsLogGroup`](crate::client::fluent_builders::ListTagsLogGroup) for more information about the
/// operation and its arguments.
pub fn list_tags_log_group(&self) -> fluent_builders::ListTagsLogGroup<C, M, R> {
fluent_builders::ListTagsLogGroup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutDestination` operation.
///
/// See [`PutDestination`](crate::client::fluent_builders::PutDestination) for more information about the
/// operation and its arguments.
pub fn put_destination(&self) -> fluent_builders::PutDestination<C, M, R> {
fluent_builders::PutDestination::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutDestinationPolicy` operation.
///
/// See [`PutDestinationPolicy`](crate::client::fluent_builders::PutDestinationPolicy) for more information about the
/// operation and its arguments.
pub fn put_destination_policy(&self) -> fluent_builders::PutDestinationPolicy<C, M, R> {
fluent_builders::PutDestinationPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutLogEvents` operation.
///
/// See [`PutLogEvents`](crate::client::fluent_builders::PutLogEvents) for more information about the
/// operation and its arguments.
pub fn put_log_events(&self) -> fluent_builders::PutLogEvents<C, M, R> {
fluent_builders::PutLogEvents::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutMetricFilter` operation.
///
/// See [`PutMetricFilter`](crate::client::fluent_builders::PutMetricFilter) for more information about the
/// operation and its arguments.
pub fn put_metric_filter(&self) -> fluent_builders::PutMetricFilter<C, M, R> {
fluent_builders::PutMetricFilter::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutQueryDefinition` operation.
///
/// See [`PutQueryDefinition`](crate::client::fluent_builders::PutQueryDefinition) for more information about the
/// operation and its arguments.
pub fn put_query_definition(&self) -> fluent_builders::PutQueryDefinition<C, M, R> {
fluent_builders::PutQueryDefinition::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutResourcePolicy` operation.
///
/// See [`PutResourcePolicy`](crate::client::fluent_builders::PutResourcePolicy) for more information about the
/// operation and its arguments.
pub fn put_resource_policy(&self) -> fluent_builders::PutResourcePolicy<C, M, R> {
fluent_builders::PutResourcePolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutRetentionPolicy` operation.
///
/// See [`PutRetentionPolicy`](crate::client::fluent_builders::PutRetentionPolicy) for more information about the
/// operation and its arguments.
pub fn put_retention_policy(&self) -> fluent_builders::PutRetentionPolicy<C, M, R> {
fluent_builders::PutRetentionPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutSubscriptionFilter` operation.
///
/// See [`PutSubscriptionFilter`](crate::client::fluent_builders::PutSubscriptionFilter) for more information about the
/// operation and its arguments.
pub fn put_subscription_filter(&self) -> fluent_builders::PutSubscriptionFilter<C, M, R> {
fluent_builders::PutSubscriptionFilter::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartQuery` operation.
///
/// See [`StartQuery`](crate::client::fluent_builders::StartQuery) for more information about the
/// operation and its arguments.
pub fn start_query(&self) -> fluent_builders::StartQuery<C, M, R> {
fluent_builders::StartQuery::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StopQuery` operation.
///
/// See [`StopQuery`](crate::client::fluent_builders::StopQuery) for more information about the
/// operation and its arguments.
pub fn stop_query(&self) -> fluent_builders::StopQuery<C, M, R> {
fluent_builders::StopQuery::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TagLogGroup` operation.
///
/// See [`TagLogGroup`](crate::client::fluent_builders::TagLogGroup) for more information about the
/// operation and its arguments.
pub fn tag_log_group(&self) -> fluent_builders::TagLogGroup<C, M, R> {
fluent_builders::TagLogGroup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TestMetricFilter` operation.
///
/// See [`TestMetricFilter`](crate::client::fluent_builders::TestMetricFilter) for more information about the
/// operation and its arguments.
pub fn test_metric_filter(&self) -> fluent_builders::TestMetricFilter<C, M, R> {
fluent_builders::TestMetricFilter::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UntagLogGroup` operation.
///
/// See [`UntagLogGroup`](crate::client::fluent_builders::UntagLogGroup) for more information about the
/// operation and its arguments.
pub fn untag_log_group(&self) -> fluent_builders::UntagLogGroup<C, M, R> {
fluent_builders::UntagLogGroup::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `AssociateKmsKey`.
///
/// <p>Associates the specified Key Management Service customer master key (CMK) with the specified log group.</p>
/// <p>Associating an KMS CMK with a log group overrides any existing associations between the log group and a CMK.
/// After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK.
/// This association is stored as long as the data encrypted with the CMK is still within CloudWatch Logs.
/// This enables CloudWatch Logs to decrypt this data whenever it is requested.</p>
/// <important>
/// <p>CloudWatch Logs supports only symmetric CMKs. Do not use an associate an asymmetric CMK
/// with your log group. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric
/// Keys</a>.</p>
/// </important>
/// <p>It can take up to 5 minutes for this operation to take effect.</p>
/// <p>If you attempt to associate a CMK with a log group but the CMK does not exist or the
/// CMK is disabled, you receive an <code>InvalidParameterException</code> error. </p>
#[derive(std::fmt::Debug)]
pub struct AssociateKmsKey<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::associate_kms_key_input::Builder,
}
impl<C, M, R> AssociateKmsKey<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `AssociateKmsKey`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::AssociateKmsKeyOutput,
aws_smithy_http::result::SdkError<crate::error::AssociateKmsKeyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::AssociateKmsKeyInputOperationOutputAlias,
crate::output::AssociateKmsKeyOutput,
crate::error::AssociateKmsKeyError,
crate::input::AssociateKmsKeyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This must be a symmetric CMK.
/// For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms">Amazon Resource Names - Key Management Service</a> and <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric Keys</a>.</p>
pub fn kms_key_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.kms_key_id(inp);
self
}
/// <p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This must be a symmetric CMK.
/// For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms">Amazon Resource Names - Key Management Service</a> and <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric Keys</a>.</p>
pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_kms_key_id(input);
self
}
}
/// Fluent builder constructing a request to `CancelExportTask`.
///
/// <p>Cancels the specified export task.</p>
/// <p>The task must be in the <code>PENDING</code> or <code>RUNNING</code> state.</p>
#[derive(std::fmt::Debug)]
pub struct CancelExportTask<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::cancel_export_task_input::Builder,
}
impl<C, M, R> CancelExportTask<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CancelExportTask`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CancelExportTaskOutput,
aws_smithy_http::result::SdkError<crate::error::CancelExportTaskError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CancelExportTaskInputOperationOutputAlias,
crate::output::CancelExportTaskOutput,
crate::error::CancelExportTaskError,
crate::input::CancelExportTaskInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the export task.</p>
pub fn task_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.task_id(inp);
self
}
/// <p>The ID of the export task.</p>
pub fn set_task_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_task_id(input);
self
}
}
/// Fluent builder constructing a request to `CreateExportTask`.
///
/// <p>Creates an export task, which allows you to efficiently export data from a
/// log group to an Amazon S3 bucket. When you perform a <code>CreateExportTask</code>
/// operation, you must use credentials that have permission to write to the S3 bucket
/// that you specify as the destination.</p>
/// <p>This is an asynchronous call. If all the required information is provided, this
/// operation initiates an export task and responds with the ID of the task. After the task has started,
/// you can use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html">DescribeExportTasks</a> to get the status of the export task. Each account can
/// only have one active (<code>RUNNING</code> or <code>PENDING</code>) export task at a time.
/// To cancel an export task, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html">CancelExportTask</a>.</p>
/// <p>You can export logs from multiple log groups or multiple time ranges to the same S3
/// bucket. To separate out log data for each export task, you can specify a prefix to be used as
/// the Amazon S3 key prefix for all exported objects.</p>
/// <p>Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets
/// encrypted with SSE-KMS is not supported. </p>
#[derive(std::fmt::Debug)]
pub struct CreateExportTask<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_export_task_input::Builder,
}
impl<C, M, R> CreateExportTask<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateExportTask`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateExportTaskOutput,
aws_smithy_http::result::SdkError<crate::error::CreateExportTaskError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateExportTaskInputOperationOutputAlias,
crate::output::CreateExportTaskOutput,
crate::error::CreateExportTaskError,
crate::input::CreateExportTaskInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the export task.</p>
pub fn task_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.task_name(inp);
self
}
/// <p>The name of the export task.</p>
pub fn set_task_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_task_name(input);
self
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>Export only log streams that match the provided prefix. If you don't
/// specify a value, no prefix filter is applied.</p>
pub fn log_stream_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name_prefix(inp);
self
}
/// <p>Export only log streams that match the provided prefix. If you don't
/// specify a value, no prefix filter is applied.</p>
pub fn set_log_stream_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name_prefix(input);
self
}
/// <p>The start time of the range for the request, expressed as the number of milliseconds
/// after Jan 1, 1970 00:00:00 UTC. Events with a timestamp earlier than this time are not
/// exported.</p>
pub fn from(mut self, inp: i64) -> Self {
self.inner = self.inner.from(inp);
self
}
/// <p>The start time of the range for the request, expressed as the number of milliseconds
/// after Jan 1, 1970 00:00:00 UTC. Events with a timestamp earlier than this time are not
/// exported.</p>
pub fn set_from(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_from(input);
self
}
/// <p>The end time of the range for the request, expreswatchlogsdocused as the number of milliseconds
/// after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not
/// exported.</p>
pub fn to(mut self, inp: i64) -> Self {
self.inner = self.inner.to(inp);
self
}
/// <p>The end time of the range for the request, expreswatchlogsdocused as the number of milliseconds
/// after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not
/// exported.</p>
pub fn set_to(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_to(input);
self
}
/// <p>The name of S3 bucket for the exported log data. The bucket must be in the same Amazon Web Services region.</p>
pub fn destination(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination(inp);
self
}
/// <p>The name of S3 bucket for the exported log data. The bucket must be in the same Amazon Web Services region.</p>
pub fn set_destination(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_destination(input);
self
}
/// <p>The prefix used as the start of the key for every object exported. If you don't
/// specify a value, the default is <code>exportedlogs</code>.</p>
pub fn destination_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination_prefix(inp);
self
}
/// <p>The prefix used as the start of the key for every object exported. If you don't
/// specify a value, the default is <code>exportedlogs</code>.</p>
pub fn set_destination_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_destination_prefix(input);
self
}
}
/// Fluent builder constructing a request to `CreateLogGroup`.
///
/// <p>Creates a log group with the specified name. You can create up to 20,000 log groups per account.</p>
/// <p>You must use the following guidelines when naming a log group:</p>
/// <ul>
/// <li>
/// <p>Log group names must be unique within a region for an Amazon Web Services account.</p>
/// </li>
/// <li>
/// <p>Log group names can be between 1 and 512 characters long.</p>
/// </li>
/// <li>
/// <p>Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen),
/// '/' (forward slash), '.' (period), and '#' (number sign)</p>
/// </li>
/// </ul>
/// <p>When you create a log group, by default the log events in the log group never expire. To set
/// a retention policy so that events expire and are deleted after a specified time, use
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html">PutRetentionPolicy</a>.</p>
/// <p>If you associate a Key Management Service customer master key (CMK) with the log group, ingested data is encrypted using the CMK.
/// This association is stored as long as the data encrypted with the CMK is still within CloudWatch Logs.
/// This enables CloudWatch Logs to decrypt this data whenever it is requested.</p>
/// <p>If you attempt to associate a CMK with the log group but the CMK does not exist or the
/// CMK is disabled, you receive an <code>InvalidParameterException</code> error. </p>
/// <important>
/// <p>CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric CMK with
/// your log group. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric
/// Keys</a>.</p>
/// </important>
#[derive(std::fmt::Debug)]
pub struct CreateLogGroup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_log_group_input::Builder,
}
impl<C, M, R> CreateLogGroup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateLogGroup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateLogGroupOutput,
aws_smithy_http::result::SdkError<crate::error::CreateLogGroupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateLogGroupInputOperationOutputAlias,
crate::output::CreateLogGroupOutput,
crate::error::CreateLogGroupError,
crate::input::CreateLogGroupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
/// For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms">Amazon Resource Names - Key Management Service</a>.</p>
pub fn kms_key_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.kms_key_id(inp);
self
}
/// <p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
/// For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms">Amazon Resource Names - Key Management Service</a>.</p>
pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_kms_key_id(input);
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The key-value pairs to use for the tags.</p>
/// <p>CloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to
/// log groups using the <code>aws:Resource/<i>key-name</i>
/// </code> or <code>aws:TagKeys</code> condition keys.
/// For more information about using tags to control access, see
/// <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html">Controlling access to Amazon Web Services resources using tags</a>.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
/// <p>The key-value pairs to use for the tags.</p>
/// <p>CloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to
/// log groups using the <code>aws:Resource/<i>key-name</i>
/// </code> or <code>aws:TagKeys</code> condition keys.
/// For more information about using tags to control access, see
/// <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html">Controlling access to Amazon Web Services resources using tags</a>.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `CreateLogStream`.
///
/// <p>Creates a log stream for the specified log group. A log stream is a sequence of log events
/// that originate from a single source, such as an application instance or a resource that is
/// being monitored.</p>
/// <p>There is no limit on the number of log streams that you can create for a log group. There is a limit
/// of 50 TPS on <code>CreateLogStream</code> operations, after which transactions are throttled.</p>
/// <p>You must use the following guidelines when naming a log stream:</p>
/// <ul>
/// <li>
/// <p>Log stream names must be unique within the log group.</p>
/// </li>
/// <li>
/// <p>Log stream names can be between 1 and 512 characters long.</p>
/// </li>
/// <li>
/// <p>The ':' (colon) and '*' (asterisk) characters are not allowed.</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct CreateLogStream<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_log_stream_input::Builder,
}
impl<C, M, R> CreateLogStream<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateLogStream`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateLogStreamOutput,
aws_smithy_http::result::SdkError<crate::error::CreateLogStreamError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateLogStreamInputOperationOutputAlias,
crate::output::CreateLogStreamOutput,
crate::error::CreateLogStreamError,
crate::input::CreateLogStreamInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The name of the log stream.</p>
pub fn log_stream_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name(inp);
self
}
/// <p>The name of the log stream.</p>
pub fn set_log_stream_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteDestination`.
///
/// <p>Deletes the specified destination, and eventually disables all the
/// subscription filters that publish to it. This operation does not delete the
/// physical resource encapsulated by the destination.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteDestination<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_destination_input::Builder,
}
impl<C, M, R> DeleteDestination<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteDestination`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteDestinationOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteDestinationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteDestinationInputOperationOutputAlias,
crate::output::DeleteDestinationOutput,
crate::error::DeleteDestinationError,
crate::input::DeleteDestinationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the destination.</p>
pub fn destination_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination_name(inp);
self
}
/// <p>The name of the destination.</p>
pub fn set_destination_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_destination_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteLogGroup`.
///
/// <p>Deletes the specified log group and permanently deletes all the archived
/// log events associated with the log group.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteLogGroup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_log_group_input::Builder,
}
impl<C, M, R> DeleteLogGroup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteLogGroup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteLogGroupOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteLogGroupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteLogGroupInputOperationOutputAlias,
crate::output::DeleteLogGroupOutput,
crate::error::DeleteLogGroupError,
crate::input::DeleteLogGroupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteLogStream`.
///
/// <p>Deletes the specified log stream and permanently deletes all the archived log events associated
/// with the log stream.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteLogStream<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_log_stream_input::Builder,
}
impl<C, M, R> DeleteLogStream<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteLogStream`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteLogStreamOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteLogStreamError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteLogStreamInputOperationOutputAlias,
crate::output::DeleteLogStreamOutput,
crate::error::DeleteLogStreamError,
crate::input::DeleteLogStreamInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The name of the log stream.</p>
pub fn log_stream_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name(inp);
self
}
/// <p>The name of the log stream.</p>
pub fn set_log_stream_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteMetricFilter`.
///
/// <p>Deletes the specified metric filter.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteMetricFilter<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_metric_filter_input::Builder,
}
impl<C, M, R> DeleteMetricFilter<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteMetricFilter`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteMetricFilterOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteMetricFilterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteMetricFilterInputOperationOutputAlias,
crate::output::DeleteMetricFilterOutput,
crate::error::DeleteMetricFilterError,
crate::input::DeleteMetricFilterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The name of the metric filter.</p>
pub fn filter_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_name(inp);
self
}
/// <p>The name of the metric filter.</p>
pub fn set_filter_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_filter_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteQueryDefinition`.
///
/// <p>Deletes a saved CloudWatch Logs Insights query definition.
/// A query definition contains details about a saved CloudWatch Logs Insights query.</p>
/// <p>Each <code>DeleteQueryDefinition</code> operation can delete one query definition.</p>
/// <p>You must have the <code>logs:DeleteQueryDefinition</code> permission to be able to perform
/// this operation.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteQueryDefinition<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_query_definition_input::Builder,
}
impl<C, M, R> DeleteQueryDefinition<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteQueryDefinition`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteQueryDefinitionOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteQueryDefinitionError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteQueryDefinitionInputOperationOutputAlias,
crate::output::DeleteQueryDefinitionOutput,
crate::error::DeleteQueryDefinitionError,
crate::input::DeleteQueryDefinitionInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the query definition that you want to delete. You can use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html">DescribeQueryDefinitions</a> to retrieve the
/// IDs of your saved query definitions.</p>
pub fn query_definition_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_definition_id(inp);
self
}
/// <p>The ID of the query definition that you want to delete. You can use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html">DescribeQueryDefinitions</a> to retrieve the
/// IDs of your saved query definitions.</p>
pub fn set_query_definition_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_query_definition_id(input);
self
}
}
/// Fluent builder constructing a request to `DeleteResourcePolicy`.
///
/// <p>Deletes a resource policy from this account. This revokes
/// the access of the identities in that policy to put log events to this account.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteResourcePolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_resource_policy_input::Builder,
}
impl<C, M, R> DeleteResourcePolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteResourcePolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteResourcePolicyOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteResourcePolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteResourcePolicyInputOperationOutputAlias,
crate::output::DeleteResourcePolicyOutput,
crate::error::DeleteResourcePolicyError,
crate::input::DeleteResourcePolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the policy to be revoked. This parameter is required.</p>
pub fn policy_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.policy_name(inp);
self
}
/// <p>The name of the policy to be revoked. This parameter is required.</p>
pub fn set_policy_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_policy_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteRetentionPolicy`.
///
/// <p>Deletes the specified retention policy.</p>
/// <p>Log events do not expire if they belong to log groups without a retention policy.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteRetentionPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_retention_policy_input::Builder,
}
impl<C, M, R> DeleteRetentionPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteRetentionPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteRetentionPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteRetentionPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteRetentionPolicyInputOperationOutputAlias,
crate::output::DeleteRetentionPolicyOutput,
crate::error::DeleteRetentionPolicyError,
crate::input::DeleteRetentionPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteSubscriptionFilter`.
///
/// <p>Deletes the specified subscription filter.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteSubscriptionFilter<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_subscription_filter_input::Builder,
}
impl<C, M, R> DeleteSubscriptionFilter<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteSubscriptionFilter`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteSubscriptionFilterOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteSubscriptionFilterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteSubscriptionFilterInputOperationOutputAlias,
crate::output::DeleteSubscriptionFilterOutput,
crate::error::DeleteSubscriptionFilterError,
crate::input::DeleteSubscriptionFilterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The name of the subscription filter.</p>
pub fn filter_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_name(inp);
self
}
/// <p>The name of the subscription filter.</p>
pub fn set_filter_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_filter_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeDestinations`.
///
/// <p>Lists all your destinations. The results are ASCII-sorted by destination name.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeDestinations<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_destinations_input::Builder,
}
impl<C, M, R> DescribeDestinations<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeDestinations`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeDestinationsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeDestinationsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeDestinationsInputOperationOutputAlias,
crate::output::DescribeDestinationsOutput,
crate::error::DescribeDestinationsError,
crate::input::DescribeDestinationsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The prefix to match. If you don't specify a value, no prefix filter is applied.</p>
pub fn destination_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination_name_prefix(inp);
self
}
/// <p>The prefix to match. If you don't specify a value, no prefix filter is applied.</p>
pub fn set_destination_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_destination_name_prefix(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `DescribeExportTasks`.
///
/// <p>Lists the specified export tasks. You can list all your export tasks or filter
/// the results based on task ID or task status.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeExportTasks<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_export_tasks_input::Builder,
}
impl<C, M, R> DescribeExportTasks<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeExportTasks`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeExportTasksOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeExportTasksError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeExportTasksInputOperationOutputAlias,
crate::output::DescribeExportTasksOutput,
crate::error::DescribeExportTasksError,
crate::input::DescribeExportTasksInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the export task. Specifying a task ID filters the results to zero or one export tasks.</p>
pub fn task_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.task_id(inp);
self
}
/// <p>The ID of the export task. Specifying a task ID filters the results to zero or one export tasks.</p>
pub fn set_task_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_task_id(input);
self
}
/// <p>The status code of the export task. Specifying a status code filters the results to zero or more export tasks.</p>
pub fn status_code(mut self, inp: crate::model::ExportTaskStatusCode) -> Self {
self.inner = self.inner.status_code(inp);
self
}
/// <p>The status code of the export task. Specifying a status code filters the results to zero or more export tasks.</p>
pub fn set_status_code(
mut self,
input: std::option::Option<crate::model::ExportTaskStatusCode>,
) -> Self {
self.inner = self.inner.set_status_code(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `DescribeLogGroups`.
///
/// <p>Lists the specified log groups. You can list all your log groups or filter the results by prefix.
/// The results are ASCII-sorted by log group name.</p>
/// <p>CloudWatch Logs doesn’t support IAM policies that control access to the <code>DescribeLogGroups</code> action by using the
/// <code>aws:ResourceTag/<i>key-name</i>
/// </code> condition key. Other CloudWatch Logs actions
/// do support the use of the <code>aws:ResourceTag/<i>key-name</i>
/// </code> condition key to control access.
/// For more information about using tags to control access, see
/// <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html">Controlling access to Amazon Web Services resources using tags</a>.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeLogGroups<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_log_groups_input::Builder,
}
impl<C, M, R> DescribeLogGroups<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeLogGroups`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeLogGroupsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeLogGroupsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeLogGroupsInputOperationOutputAlias,
crate::output::DescribeLogGroupsOutput,
crate::error::DescribeLogGroupsError,
crate::input::DescribeLogGroupsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The prefix to match.</p>
pub fn log_group_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name_prefix(inp);
self
}
/// <p>The prefix to match.</p>
pub fn set_log_group_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name_prefix(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `DescribeLogStreams`.
///
/// <p>Lists the log streams for the specified log group.
/// You can list all the log streams or filter the results by prefix.
/// You can also control how the results are ordered.</p>
/// <p>This operation has a limit of five transactions per second, after which transactions are throttled.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeLogStreams<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_log_streams_input::Builder,
}
impl<C, M, R> DescribeLogStreams<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeLogStreams`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeLogStreamsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeLogStreamsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeLogStreamsInputOperationOutputAlias,
crate::output::DescribeLogStreamsOutput,
crate::error::DescribeLogStreamsError,
crate::input::DescribeLogStreamsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The prefix to match.</p>
/// <p>If <code>orderBy</code> is <code>LastEventTime</code>, you cannot specify this
/// parameter.</p>
pub fn log_stream_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name_prefix(inp);
self
}
/// <p>The prefix to match.</p>
/// <p>If <code>orderBy</code> is <code>LastEventTime</code>, you cannot specify this
/// parameter.</p>
pub fn set_log_stream_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name_prefix(input);
self
}
/// <p>If the value is <code>LogStreamName</code>, the results are ordered by log stream name.
/// If the value is <code>LastEventTime</code>, the results are ordered by the event time.
/// The default value is <code>LogStreamName</code>.</p>
/// <p>If you order the results by event time, you cannot specify the <code>logStreamNamePrefix</code> parameter.</p>
/// <p>
/// <code>lastEventTimestamp</code> represents the time of the most recent log event in the
/// log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after
/// Jan 1, 1970 00:00:00 UTC. <code>lastEventTimestamp</code> updates on an eventual consistency
/// basis. It typically updates in less than an hour from ingestion, but in rare situations might
/// take longer.</p>
pub fn order_by(mut self, inp: crate::model::OrderBy) -> Self {
self.inner = self.inner.order_by(inp);
self
}
/// <p>If the value is <code>LogStreamName</code>, the results are ordered by log stream name.
/// If the value is <code>LastEventTime</code>, the results are ordered by the event time.
/// The default value is <code>LogStreamName</code>.</p>
/// <p>If you order the results by event time, you cannot specify the <code>logStreamNamePrefix</code> parameter.</p>
/// <p>
/// <code>lastEventTimestamp</code> represents the time of the most recent log event in the
/// log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after
/// Jan 1, 1970 00:00:00 UTC. <code>lastEventTimestamp</code> updates on an eventual consistency
/// basis. It typically updates in less than an hour from ingestion, but in rare situations might
/// take longer.</p>
pub fn set_order_by(mut self, input: std::option::Option<crate::model::OrderBy>) -> Self {
self.inner = self.inner.set_order_by(input);
self
}
/// <p>If the value is true, results are returned in descending order.
/// If the value is to false, results are returned in ascending order.
/// The default value is false.</p>
pub fn descending(mut self, inp: bool) -> Self {
self.inner = self.inner.descending(inp);
self
}
/// <p>If the value is true, results are returned in descending order.
/// If the value is to false, results are returned in ascending order.
/// The default value is false.</p>
pub fn set_descending(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_descending(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `DescribeMetricFilters`.
///
/// <p>Lists the specified metric filters. You can list all of the metric filters or filter
/// the results by log name, prefix, metric name, or metric namespace. The results are
/// ASCII-sorted by filter name.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeMetricFilters<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_metric_filters_input::Builder,
}
impl<C, M, R> DescribeMetricFilters<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeMetricFilters`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeMetricFiltersOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeMetricFiltersError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeMetricFiltersInputOperationOutputAlias,
crate::output::DescribeMetricFiltersOutput,
crate::error::DescribeMetricFiltersError,
crate::input::DescribeMetricFiltersInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The prefix to match. CloudWatch Logs uses the value you set here
/// only if you also include the <code>logGroupName</code> parameter in your request.</p>
pub fn filter_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_name_prefix(inp);
self
}
/// <p>The prefix to match. CloudWatch Logs uses the value you set here
/// only if you also include the <code>logGroupName</code> parameter in your request.</p>
pub fn set_filter_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_name_prefix(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>Filters results to include only those with the specified metric name. If you include this parameter in your request, you
/// must also include the <code>metricNamespace</code> parameter.</p>
pub fn metric_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.metric_name(inp);
self
}
/// <p>Filters results to include only those with the specified metric name. If you include this parameter in your request, you
/// must also include the <code>metricNamespace</code> parameter.</p>
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_metric_name(input);
self
}
/// <p>Filters results to include only those in the specified namespace. If you include this parameter in your request, you
/// must also include the <code>metricName</code> parameter.</p>
pub fn metric_namespace(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.metric_namespace(inp);
self
}
/// <p>Filters results to include only those in the specified namespace. If you include this parameter in your request, you
/// must also include the <code>metricName</code> parameter.</p>
pub fn set_metric_namespace(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_metric_namespace(input);
self
}
}
/// Fluent builder constructing a request to `DescribeQueries`.
///
/// <p>Returns a list of CloudWatch Logs Insights queries that are scheduled, executing, or have
/// been executed recently in this account. You can request all queries or limit it to queries of
/// a specific log group or queries with a certain status.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeQueries<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_queries_input::Builder,
}
impl<C, M, R> DescribeQueries<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeQueries`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeQueriesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeQueriesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeQueriesInputOperationOutputAlias,
crate::output::DescribeQueriesOutput,
crate::error::DescribeQueriesError,
crate::input::DescribeQueriesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>Limits the returned queries to only those for the specified log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>Limits the returned queries to only those for the specified log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>Limits the returned queries to only those that have the specified status. Valid values are <code>Cancelled</code>,
/// <code>Complete</code>, <code>Failed</code>, <code>Running</code>, and <code>Scheduled</code>.</p>
pub fn status(mut self, inp: crate::model::QueryStatus) -> Self {
self.inner = self.inner.status(inp);
self
}
/// <p>Limits the returned queries to only those that have the specified status. Valid values are <code>Cancelled</code>,
/// <code>Complete</code>, <code>Failed</code>, <code>Running</code>, and <code>Scheduled</code>.</p>
pub fn set_status(mut self, input: std::option::Option<crate::model::QueryStatus>) -> Self {
self.inner = self.inner.set_status(input);
self
}
/// <p>Limits the number of returned queries to the specified number.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Limits the number of returned queries to the specified number.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token for the next set of items to return. The token expires after 24 hours.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. The token expires after 24 hours.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `DescribeQueryDefinitions`.
///
/// <p>This operation returns a paginated list of your saved CloudWatch Logs Insights query definitions.</p>
/// <p>You can use the <code>queryDefinitionNamePrefix</code> parameter to limit the results to only the
/// query definitions that have names that start with a certain string.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeQueryDefinitions<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_query_definitions_input::Builder,
}
impl<C, M, R> DescribeQueryDefinitions<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeQueryDefinitions`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeQueryDefinitionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeQueryDefinitionsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeQueryDefinitionsInputOperationOutputAlias,
crate::output::DescribeQueryDefinitionsOutput,
crate::error::DescribeQueryDefinitionsError,
crate::input::DescribeQueryDefinitionsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>Use this parameter to filter your results to only the query definitions that have names that start with the prefix you specify.</p>
pub fn query_definition_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_definition_name_prefix(inp);
self
}
/// <p>Use this parameter to filter your results to only the query definitions that have names that start with the prefix you specify.</p>
pub fn set_query_definition_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_query_definition_name_prefix(input);
self
}
/// <p>Limits the number of returned query definitions to the specified number.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Limits the number of returned query definitions to the specified number.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token for the next set of items to return. The token expires after 24 hours.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. The token expires after 24 hours.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `DescribeResourcePolicies`.
///
/// <p>Lists the resource policies in this account.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeResourcePolicies<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_resource_policies_input::Builder,
}
impl<C, M, R> DescribeResourcePolicies<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeResourcePolicies`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeResourcePoliciesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeResourcePoliciesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeResourcePoliciesInputOperationOutputAlias,
crate::output::DescribeResourcePoliciesOutput,
crate::error::DescribeResourcePoliciesError,
crate::input::DescribeResourcePoliciesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The token for the next set of items to return. The token expires after 24 hours.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. The token expires after 24 hours.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of resource policies to be displayed with one call of this API.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of resource policies to be displayed with one call of this API.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `DescribeSubscriptionFilters`.
///
/// <p>Lists the subscription filters for the specified log group. You can list all the subscription filters or filter the results by prefix.
/// The results are ASCII-sorted by filter name.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeSubscriptionFilters<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_subscription_filters_input::Builder,
}
impl<C, M, R> DescribeSubscriptionFilters<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeSubscriptionFilters`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeSubscriptionFiltersOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeSubscriptionFiltersError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeSubscriptionFiltersInputOperationOutputAlias,
crate::output::DescribeSubscriptionFiltersOutput,
crate::error::DescribeSubscriptionFiltersError,
crate::input::DescribeSubscriptionFiltersInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The prefix to match. If you don't specify a value, no prefix filter is applied.</p>
pub fn filter_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_name_prefix(inp);
self
}
/// <p>The prefix to match. If you don't specify a value, no prefix filter is applied.</p>
pub fn set_filter_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_name_prefix(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items returned. If you don't specify a value, the default is up to 50 items.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `DisassociateKmsKey`.
///
/// <p>Disassociates the associated Key Management Service customer master key (CMK) from the specified log group.</p>
/// <p>After the KMS CMK is disassociated from the log group, CloudWatch Logs stops encrypting newly ingested data for the log group.
/// All previously ingested data remains encrypted, and CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested.</p>
/// <p>Note that it can take up to 5 minutes for this operation to take effect.</p>
#[derive(std::fmt::Debug)]
pub struct DisassociateKmsKey<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::disassociate_kms_key_input::Builder,
}
impl<C, M, R> DisassociateKmsKey<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DisassociateKmsKey`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DisassociateKmsKeyOutput,
aws_smithy_http::result::SdkError<crate::error::DisassociateKmsKeyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DisassociateKmsKeyInputOperationOutputAlias,
crate::output::DisassociateKmsKeyOutput,
crate::error::DisassociateKmsKeyError,
crate::input::DisassociateKmsKeyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
}
/// Fluent builder constructing a request to `FilterLogEvents`.
///
/// <p>Lists log events from the specified log group. You can list all the log events or filter the results
/// using a filter pattern, a time range, and the name of the log stream.</p>
/// <p>By default, this operation returns as many log events as can fit in 1 MB (up to 10,000
/// log events) or all the events found within the time range that you specify. If the results
/// include a token, then there are more log events available, and you can get additional results
/// by specifying the token in a subsequent call. This operation can return empty results
/// while there are more log events available through the token.</p>
/// <p>The returned log events are sorted by event timestamp, the timestamp when the event was ingested
/// by CloudWatch Logs, and the ID of the <code>PutLogEvents</code> request.</p>
#[derive(std::fmt::Debug)]
pub struct FilterLogEvents<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::filter_log_events_input::Builder,
}
impl<C, M, R> FilterLogEvents<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `FilterLogEvents`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::FilterLogEventsOutput,
aws_smithy_http::result::SdkError<crate::error::FilterLogEventsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::FilterLogEventsInputOperationOutputAlias,
crate::output::FilterLogEventsOutput,
crate::error::FilterLogEventsError,
crate::input::FilterLogEventsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group to search.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group to search.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// Appends an item to `logStreamNames`.
///
/// To override the contents of this collection use [`set_log_stream_names`](Self::set_log_stream_names).
///
/// <p>Filters the results to only logs from the log streams in this list.</p>
/// <p>If you specify a value for both <code>logStreamNamePrefix</code> and <code>logStreamNames</code>, the action
/// returns an <code>InvalidParameterException</code> error.</p>
pub fn log_stream_names(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_names(inp);
self
}
/// <p>Filters the results to only logs from the log streams in this list.</p>
/// <p>If you specify a value for both <code>logStreamNamePrefix</code> and <code>logStreamNames</code>, the action
/// returns an <code>InvalidParameterException</code> error.</p>
pub fn set_log_stream_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_log_stream_names(input);
self
}
/// <p>Filters the results to include only events from log streams that have names starting with this prefix.</p>
/// <p>If you specify a value for both <code>logStreamNamePrefix</code> and <code>logStreamNames</code>, but the value for
/// <code>logStreamNamePrefix</code> does not match any log stream names specified in <code>logStreamNames</code>, the action
/// returns an <code>InvalidParameterException</code> error.</p>
pub fn log_stream_name_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name_prefix(inp);
self
}
/// <p>Filters the results to include only events from log streams that have names starting with this prefix.</p>
/// <p>If you specify a value for both <code>logStreamNamePrefix</code> and <code>logStreamNames</code>, but the value for
/// <code>logStreamNamePrefix</code> does not match any log stream names specified in <code>logStreamNames</code>, the action
/// returns an <code>InvalidParameterException</code> error.</p>
pub fn set_log_stream_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name_prefix(input);
self
}
/// <p>The start of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp before this time are not returned.</p>
pub fn start_time(mut self, inp: i64) -> Self {
self.inner = self.inner.start_time(inp);
self
}
/// <p>The start of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp before this time are not returned.</p>
pub fn set_start_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_start_time(input);
self
}
/// <p>The end of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp later than this time are not returned.</p>
pub fn end_time(mut self, inp: i64) -> Self {
self.inner = self.inner.end_time(inp);
self
}
/// <p>The end of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp later than this time are not returned.</p>
pub fn set_end_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_end_time(input);
self
}
/// <p>The filter pattern to use. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html">Filter and Pattern Syntax</a>.</p>
/// <p>If not provided, all the events are matched.</p>
pub fn filter_pattern(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_pattern(inp);
self
}
/// <p>The filter pattern to use. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html">Filter and Pattern Syntax</a>.</p>
/// <p>If not provided, all the events are matched.</p>
pub fn set_filter_pattern(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_pattern(input);
self
}
/// <p>The token for the next set of events to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of events to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of events to return. The default is 10,000 events.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of events to return. The default is 10,000 events.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>If the value is true, the operation makes a best effort to provide responses that
/// contain events from multiple log streams within the log group, interleaved in a single
/// response. If the value is false, all the matched log events in the first log stream are
/// searched first, then those in the next log stream, and so on. The default is false.</p>
/// <p>
/// <b>Important:</b> Starting on June 17, 2019, this parameter
/// is ignored and the value is assumed to be true. The response from this operation always
/// interleaves events from multiple log streams within a log group.</p>
pub fn interleaved(mut self, inp: bool) -> Self {
self.inner = self.inner.interleaved(inp);
self
}
/// <p>If the value is true, the operation makes a best effort to provide responses that
/// contain events from multiple log streams within the log group, interleaved in a single
/// response. If the value is false, all the matched log events in the first log stream are
/// searched first, then those in the next log stream, and so on. The default is false.</p>
/// <p>
/// <b>Important:</b> Starting on June 17, 2019, this parameter
/// is ignored and the value is assumed to be true. The response from this operation always
/// interleaves events from multiple log streams within a log group.</p>
pub fn set_interleaved(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_interleaved(input);
self
}
}
/// Fluent builder constructing a request to `GetLogEvents`.
///
/// <p>Lists log events from the specified log stream. You can list all of the log events or
/// filter using a time range.</p>
///
/// <p>By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events).
/// You can get additional log events by specifying one of the tokens in a subsequent call.
/// This operation can return empty results while there are more log events available through the token.</p>
#[derive(std::fmt::Debug)]
pub struct GetLogEvents<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_log_events_input::Builder,
}
impl<C, M, R> GetLogEvents<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetLogEvents`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLogEventsOutput,
aws_smithy_http::result::SdkError<crate::error::GetLogEventsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetLogEventsInputOperationOutputAlias,
crate::output::GetLogEventsOutput,
crate::error::GetLogEventsError,
crate::input::GetLogEventsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The name of the log stream.</p>
pub fn log_stream_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name(inp);
self
}
/// <p>The name of the log stream.</p>
pub fn set_log_stream_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name(input);
self
}
/// <p>The start of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp equal to this time or later than this time are included.
/// Events with a timestamp earlier than this time are not included.</p>
pub fn start_time(mut self, inp: i64) -> Self {
self.inner = self.inner.start_time(inp);
self
}
/// <p>The start of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp equal to this time or later than this time are included.
/// Events with a timestamp earlier than this time are not included.</p>
pub fn set_start_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_start_time(input);
self
}
/// <p>The end of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp equal to or later than this time are not
/// included.</p>
pub fn end_time(mut self, inp: i64) -> Self {
self.inner = self.inner.end_time(inp);
self
}
/// <p>The end of the time range, expressed as the number of milliseconds after Jan 1, 1970
/// 00:00:00 UTC. Events with a timestamp equal to or later than this time are not
/// included.</p>
pub fn set_end_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_end_time(input);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The token for the next set of items to return. (You received this token from a previous call.)</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of log events returned. If you don't specify a value, the maximum is
/// as many log events as can fit in a response size of 1 MB, up to 10,000 log events.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of log events returned. If you don't specify a value, the maximum is
/// as many log events as can fit in a response size of 1 MB, up to 10,000 log events.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>If the value is true, the earliest log events are returned first.
/// If the value is false, the latest log events are returned first.
/// The default value is false.</p>
/// <p>If you are using a previous <code>nextForwardToken</code> value as the <code>nextToken</code> in this operation,
/// you must specify <code>true</code> for <code>startFromHead</code>.</p>
pub fn start_from_head(mut self, inp: bool) -> Self {
self.inner = self.inner.start_from_head(inp);
self
}
/// <p>If the value is true, the earliest log events are returned first.
/// If the value is false, the latest log events are returned first.
/// The default value is false.</p>
/// <p>If you are using a previous <code>nextForwardToken</code> value as the <code>nextToken</code> in this operation,
/// you must specify <code>true</code> for <code>startFromHead</code>.</p>
pub fn set_start_from_head(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_start_from_head(input);
self
}
}
/// Fluent builder constructing a request to `GetLogGroupFields`.
///
/// <p>Returns a list of the fields that are included in log events in the specified log group, along with the percentage of log events
/// that contain each field. The search is limited to a time period that you specify.</p>
/// <p>In the results, fields that start with @ are fields generated by CloudWatch Logs. For
/// example, <code>@timestamp</code> is the timestamp of each log event. For more information about the fields that are
/// generated by CloudWatch logs, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html">Supported Logs and Discovered Fields</a>.</p>
/// <p>The response results are sorted by the frequency percentage, starting
/// with the highest percentage.</p>
#[derive(std::fmt::Debug)]
pub struct GetLogGroupFields<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_log_group_fields_input::Builder,
}
impl<C, M, R> GetLogGroupFields<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetLogGroupFields`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLogGroupFieldsOutput,
aws_smithy_http::result::SdkError<crate::error::GetLogGroupFieldsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetLogGroupFieldsInputOperationOutputAlias,
crate::output::GetLogGroupFieldsOutput,
crate::error::GetLogGroupFieldsError,
crate::input::GetLogGroupFieldsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group to search.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group to search.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The time to set as the center of the query. If you specify <code>time</code>, the 15 minutes
/// before this time are queries. If you omit <code>time</code> the 8
/// minutes before and 8 minutes after this time are searched.</p>
/// <p>The <code>time</code> value is specified as epoch time, the number of seconds since
/// January 1, 1970, 00:00:00 UTC.</p>
pub fn time(mut self, inp: i64) -> Self {
self.inner = self.inner.time(inp);
self
}
/// <p>The time to set as the center of the query. If you specify <code>time</code>, the 15 minutes
/// before this time are queries. If you omit <code>time</code> the 8
/// minutes before and 8 minutes after this time are searched.</p>
/// <p>The <code>time</code> value is specified as epoch time, the number of seconds since
/// January 1, 1970, 00:00:00 UTC.</p>
pub fn set_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_time(input);
self
}
}
/// Fluent builder constructing a request to `GetLogRecord`.
///
/// <p>Retrieves all of the fields and values of a single log event. All fields are retrieved,
/// even if the original query that produced the <code>logRecordPointer</code> retrieved only a
/// subset of fields. Fields are returned as field name/field value pairs.</p>
/// <p>The full unparsed log event is returned within <code>@message</code>.</p>
#[derive(std::fmt::Debug)]
pub struct GetLogRecord<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_log_record_input::Builder,
}
impl<C, M, R> GetLogRecord<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetLogRecord`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLogRecordOutput,
aws_smithy_http::result::SdkError<crate::error::GetLogRecordError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetLogRecordInputOperationOutputAlias,
crate::output::GetLogRecordOutput,
crate::error::GetLogRecordError,
crate::input::GetLogRecordInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The pointer corresponding to the log event record you want to retrieve. You get this from
/// the response of a <code>GetQueryResults</code> operation. In that response, the value of the
/// <code>@ptr</code> field for a log event is the value to use as <code>logRecordPointer</code>
/// to retrieve that complete log event record.</p>
pub fn log_record_pointer(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_record_pointer(inp);
self
}
/// <p>The pointer corresponding to the log event record you want to retrieve. You get this from
/// the response of a <code>GetQueryResults</code> operation. In that response, the value of the
/// <code>@ptr</code> field for a log event is the value to use as <code>logRecordPointer</code>
/// to retrieve that complete log event record.</p>
pub fn set_log_record_pointer(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_record_pointer(input);
self
}
}
/// Fluent builder constructing a request to `GetQueryResults`.
///
/// <p>Returns the results from the specified query.</p>
/// <p>Only the fields requested in the query are returned, along with a <code>@ptr</code>
/// field, which is the identifier for the log record. You can use the value of <code>@ptr</code>
/// in a <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html">GetLogRecord</a>
/// operation to get the full log record.</p>
/// <p>
/// <code>GetQueryResults</code>
/// does not start a query execution. To run a query, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html">StartQuery</a>.</p>
/// <p>If the value of the <code>Status</code> field in the output is <code>Running</code>, this operation
/// returns only partial results. If you see a value of <code>Scheduled</code> or <code>Running</code> for the status,
/// you can retry the operation later to see the final results. </p>
#[derive(std::fmt::Debug)]
pub struct GetQueryResults<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_query_results_input::Builder,
}
impl<C, M, R> GetQueryResults<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetQueryResults`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetQueryResultsOutput,
aws_smithy_http::result::SdkError<crate::error::GetQueryResultsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetQueryResultsInputOperationOutputAlias,
crate::output::GetQueryResultsOutput,
crate::error::GetQueryResultsError,
crate::input::GetQueryResultsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID number of the query.</p>
pub fn query_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_id(inp);
self
}
/// <p>The ID number of the query.</p>
pub fn set_query_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_query_id(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsLogGroup`.
///
/// <p>Lists the tags for the specified log group.</p>
#[derive(std::fmt::Debug)]
pub struct ListTagsLogGroup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tags_log_group_input::Builder,
}
impl<C, M, R> ListTagsLogGroup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTagsLogGroup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsLogGroupOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsLogGroupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsLogGroupInputOperationOutputAlias,
crate::output::ListTagsLogGroupOutput,
crate::error::ListTagsLogGroupError,
crate::input::ListTagsLogGroupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
}
/// Fluent builder constructing a request to `PutDestination`.
///
/// <p>Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.</p>
/// <p>A destination encapsulates a physical resource (such
/// as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events
/// for a different account, ingested using <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html">PutLogEvents</a>.</p>
/// <p>Through an access policy, a destination controls what is written to it.
/// By default, <code>PutDestination</code> does not set any access policy with the destination,
/// which means a cross-account user cannot call <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html">PutSubscriptionFilter</a> against
/// this destination. To enable this, the destination owner must call <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html">PutDestinationPolicy</a> after <code>PutDestination</code>.</p>
/// <p>To perform a <code>PutDestination</code> operation, you must also have the
/// <code>iam:PassRole</code> permission.</p>
#[derive(std::fmt::Debug)]
pub struct PutDestination<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_destination_input::Builder,
}
impl<C, M, R> PutDestination<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutDestination`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutDestinationOutput,
aws_smithy_http::result::SdkError<crate::error::PutDestinationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutDestinationInputOperationOutputAlias,
crate::output::PutDestinationOutput,
crate::error::PutDestinationError,
crate::input::PutDestinationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A name for the destination.</p>
pub fn destination_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination_name(inp);
self
}
/// <p>A name for the destination.</p>
pub fn set_destination_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_destination_name(input);
self
}
/// <p>The ARN of an Amazon Kinesis stream to which to deliver matching log events.</p>
pub fn target_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.target_arn(inp);
self
}
/// <p>The ARN of an Amazon Kinesis stream to which to deliver matching log events.</p>
pub fn set_target_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_target_arn(input);
self
}
/// <p>The ARN of an IAM role that grants CloudWatch Logs permissions to call the Amazon
/// Kinesis <code>PutRecord</code> operation on the destination stream.</p>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The ARN of an IAM role that grants CloudWatch Logs permissions to call the Amazon
/// Kinesis <code>PutRecord</code> operation on the destination stream.</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
}
/// Fluent builder constructing a request to `PutDestinationPolicy`.
///
/// <p>Creates or updates an access policy associated with an existing
/// destination. An access policy is an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html">IAM policy document</a> that is used
/// to authorize claims to register a subscription filter against a given destination.</p>
/// <p>If multiple Amazon Web Services accounts are sending logs to this destination, each sender account must be
/// listed separately in the policy. The policy does not support specifying <code>*</code>
/// as the Principal or the use of the <code>aws:PrincipalOrgId</code> global key.</p>
#[derive(std::fmt::Debug)]
pub struct PutDestinationPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_destination_policy_input::Builder,
}
impl<C, M, R> PutDestinationPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutDestinationPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutDestinationPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::PutDestinationPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutDestinationPolicyInputOperationOutputAlias,
crate::output::PutDestinationPolicyOutput,
crate::error::PutDestinationPolicyError,
crate::input::PutDestinationPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A name for an existing destination.</p>
pub fn destination_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination_name(inp);
self
}
/// <p>A name for an existing destination.</p>
pub fn set_destination_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_destination_name(input);
self
}
/// <p>An IAM policy document that authorizes cross-account users to deliver their log events
/// to the associated destination. This can be up to 5120 bytes.</p>
pub fn access_policy(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.access_policy(inp);
self
}
/// <p>An IAM policy document that authorizes cross-account users to deliver their log events
/// to the associated destination. This can be up to 5120 bytes.</p>
pub fn set_access_policy(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_access_policy(input);
self
}
}
/// Fluent builder constructing a request to `PutLogEvents`.
///
/// <p>Uploads a batch of log events to the specified log stream.</p>
/// <p>You must include the sequence token obtained from the response of the previous call. An
/// upload in a newly created log stream does not require a sequence token. You can also get the
/// sequence token in the <code>expectedSequenceToken</code> field from
/// <code>InvalidSequenceTokenException</code>. If you call <code>PutLogEvents</code> twice
/// within a narrow time period using the same value for <code>sequenceToken</code>, both calls
/// might be successful or one might be rejected.</p>
/// <p>The batch of events must satisfy the following constraints:</p>
/// <ul>
/// <li>
/// <p>The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of
/// all event messages in UTF-8, plus 26 bytes for each log event.</p>
/// </li>
/// <li>
/// <p>None of the log events in the batch can be more than 2 hours in the future.</p>
/// </li>
/// <li>
/// <p>None of the log events in the batch can be older than 14 days or older than the retention
/// period of the log group.</p>
/// </li>
/// <li>
/// <p>The log events in the batch must be in chronological order by their timestamp. The
/// timestamp is the time the event occurred, expressed as the number of milliseconds after
/// Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the
/// timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example,
/// 2017-09-15T13:45:30.) </p>
/// </li>
/// <li>
/// <p>A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.</p>
/// </li>
/// <li>
/// <p>The maximum number of log events in a batch is 10,000.</p>
/// </li>
/// <li>
/// <p>There is a quota of 5 requests per second per log stream. Additional requests are throttled. This quota can't be changed.</p>
/// </li>
/// </ul>
/// <p>If a call to <code>PutLogEvents</code> returns "UnrecognizedClientException" the most likely cause is an invalid Amazon Web Services access key ID or secret key. </p>
#[derive(std::fmt::Debug)]
pub struct PutLogEvents<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_log_events_input::Builder,
}
impl<C, M, R> PutLogEvents<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutLogEvents`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutLogEventsOutput,
aws_smithy_http::result::SdkError<crate::error::PutLogEventsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutLogEventsInputOperationOutputAlias,
crate::output::PutLogEventsOutput,
crate::error::PutLogEventsError,
crate::input::PutLogEventsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The name of the log stream.</p>
pub fn log_stream_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_stream_name(inp);
self
}
/// <p>The name of the log stream.</p>
pub fn set_log_stream_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_stream_name(input);
self
}
/// Appends an item to `logEvents`.
///
/// To override the contents of this collection use [`set_log_events`](Self::set_log_events).
///
/// <p>The log events.</p>
pub fn log_events(mut self, inp: impl Into<crate::model::InputLogEvent>) -> Self {
self.inner = self.inner.log_events(inp);
self
}
/// <p>The log events.</p>
pub fn set_log_events(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InputLogEvent>>,
) -> Self {
self.inner = self.inner.set_log_events(input);
self
}
/// <p>The sequence token obtained from the response of the previous <code>PutLogEvents</code>
/// call. An upload in a newly created log stream does not require a sequence token. You can also
/// get the sequence token using <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html">DescribeLogStreams</a>. If you call <code>PutLogEvents</code> twice within a narrow
/// time period using the same value for <code>sequenceToken</code>, both calls might be
/// successful or one might be rejected.</p>
pub fn sequence_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.sequence_token(inp);
self
}
/// <p>The sequence token obtained from the response of the previous <code>PutLogEvents</code>
/// call. An upload in a newly created log stream does not require a sequence token. You can also
/// get the sequence token using <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html">DescribeLogStreams</a>. If you call <code>PutLogEvents</code> twice within a narrow
/// time period using the same value for <code>sequenceToken</code>, both calls might be
/// successful or one might be rejected.</p>
pub fn set_sequence_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_sequence_token(input);
self
}
}
/// Fluent builder constructing a request to `PutMetricFilter`.
///
/// <p>Creates or updates a metric filter and associates it with the specified log group.
/// Metric filters allow you to configure rules to extract metric data from log events ingested
/// through <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html">PutLogEvents</a>.</p>
/// <p>The maximum number of metric filters that can be associated with a log group is
/// 100.</p>
/// <p>When you create a metric filter, you can also optionally assign a unit and dimensions
/// to the metric that is created.</p>
/// <important>
/// <p>Metrics extracted from log events are charged as custom metrics.
/// To prevent unexpected high charges, do not specify high-cardinality fields such as
/// <code>IPAddress</code> or <code>requestID</code> as dimensions. Each different value
/// found for
/// a dimension is treated as a separate metric and accrues charges as a separate custom metric.
/// </p>
/// <p>To help prevent accidental high charges, Amazon disables a metric filter
/// if it generates 1000 different name/value pairs for the dimensions that you
/// have specified within a certain amount of time.</p>
/// <p>You can also set up a billing alarm to alert you if your charges are higher than
/// expected. For more information,
/// see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html">
/// Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges</a>.
/// </p>
/// </important>
#[derive(std::fmt::Debug)]
pub struct PutMetricFilter<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_metric_filter_input::Builder,
}
impl<C, M, R> PutMetricFilter<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutMetricFilter`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutMetricFilterOutput,
aws_smithy_http::result::SdkError<crate::error::PutMetricFilterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutMetricFilterInputOperationOutputAlias,
crate::output::PutMetricFilterOutput,
crate::error::PutMetricFilterError,
crate::input::PutMetricFilterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>A name for the metric filter.</p>
pub fn filter_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_name(inp);
self
}
/// <p>A name for the metric filter.</p>
pub fn set_filter_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_filter_name(input);
self
}
/// <p>A filter pattern for extracting metric data out of ingested log events.</p>
pub fn filter_pattern(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_pattern(inp);
self
}
/// <p>A filter pattern for extracting metric data out of ingested log events.</p>
pub fn set_filter_pattern(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_pattern(input);
self
}
/// Appends an item to `metricTransformations`.
///
/// To override the contents of this collection use [`set_metric_transformations`](Self::set_metric_transformations).
///
/// <p>A collection of information that defines how metric data gets emitted.</p>
pub fn metric_transformations(
mut self,
inp: impl Into<crate::model::MetricTransformation>,
) -> Self {
self.inner = self.inner.metric_transformations(inp);
self
}
/// <p>A collection of information that defines how metric data gets emitted.</p>
pub fn set_metric_transformations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricTransformation>>,
) -> Self {
self.inner = self.inner.set_metric_transformations(input);
self
}
}
/// Fluent builder constructing a request to `PutQueryDefinition`.
///
/// <p>Creates or updates a query definition for CloudWatch Logs Insights. For
/// more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html">Analyzing Log Data with CloudWatch Logs Insights</a>.</p>
///
/// <p>To update a query definition, specify its
/// <code>queryDefinitionId</code> in your request. The values of <code>name</code>, <code>queryString</code>,
/// and <code>logGroupNames</code> are changed to the values that you specify in your update
/// operation. No current values are retained from the current query definition. For example, if
/// you update a current query definition that includes log groups, and you don't specify the
/// <code>logGroupNames</code> parameter in your update operation, the query definition changes
/// to contain no log groups.</p>
/// <p>You must have the <code>logs:PutQueryDefinition</code> permission to be able to perform
/// this operation.</p>
#[derive(std::fmt::Debug)]
pub struct PutQueryDefinition<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_query_definition_input::Builder,
}
impl<C, M, R> PutQueryDefinition<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutQueryDefinition`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutQueryDefinitionOutput,
aws_smithy_http::result::SdkError<crate::error::PutQueryDefinitionError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutQueryDefinitionInputOperationOutputAlias,
crate::output::PutQueryDefinitionOutput,
crate::error::PutQueryDefinitionError,
crate::input::PutQueryDefinitionInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A name for the query definition. If you are saving a lot of query definitions, we
/// recommend that you name them so that you can easily find the ones you want by using the first
/// part of the name as a filter in the <code>queryDefinitionNamePrefix</code> parameter of <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html">DescribeQueryDefinitions</a>.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>A name for the query definition. If you are saving a lot of query definitions, we
/// recommend that you name them so that you can easily find the ones you want by using the first
/// part of the name as a filter in the <code>queryDefinitionNamePrefix</code> parameter of <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html">DescribeQueryDefinitions</a>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>If you are updating a query definition, use this parameter to specify the ID of the query
/// definition that you want to update. You can use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html">DescribeQueryDefinitions</a> to retrieve the IDs of your saved query
/// definitions.</p>
/// <p>If you are creating a query definition, do not specify this parameter. CloudWatch
/// generates a unique ID for the new query definition and include it in the response to this
/// operation.</p>
pub fn query_definition_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_definition_id(inp);
self
}
/// <p>If you are updating a query definition, use this parameter to specify the ID of the query
/// definition that you want to update. You can use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html">DescribeQueryDefinitions</a> to retrieve the IDs of your saved query
/// definitions.</p>
/// <p>If you are creating a query definition, do not specify this parameter. CloudWatch
/// generates a unique ID for the new query definition and include it in the response to this
/// operation.</p>
pub fn set_query_definition_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_query_definition_id(input);
self
}
/// Appends an item to `logGroupNames`.
///
/// To override the contents of this collection use [`set_log_group_names`](Self::set_log_group_names).
///
/// <p>Use this parameter to include specific log groups as part of your query definition.</p>
/// <p>If you are updating a query definition and you omit this parameter, then the updated
/// definition will contain no log groups.</p>
pub fn log_group_names(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_names(inp);
self
}
/// <p>Use this parameter to include specific log groups as part of your query definition.</p>
/// <p>If you are updating a query definition and you omit this parameter, then the updated
/// definition will contain no log groups.</p>
pub fn set_log_group_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_log_group_names(input);
self
}
/// <p>The query string to use for this definition.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html">CloudWatch Logs Insights Query Syntax</a>.</p>
pub fn query_string(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_string(inp);
self
}
/// <p>The query string to use for this definition.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html">CloudWatch Logs Insights Query Syntax</a>.</p>
pub fn set_query_string(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_query_string(input);
self
}
}
/// Fluent builder constructing a request to `PutResourcePolicy`.
///
/// <p>Creates or updates a resource policy allowing other Amazon Web Services services to put log events to
/// this account, such as Amazon Route 53. An account can have up to 10 resource policies per Amazon Web Services
/// Region.</p>
#[derive(std::fmt::Debug)]
pub struct PutResourcePolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_resource_policy_input::Builder,
}
impl<C, M, R> PutResourcePolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutResourcePolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutResourcePolicyOutput,
aws_smithy_http::result::SdkError<crate::error::PutResourcePolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutResourcePolicyInputOperationOutputAlias,
crate::output::PutResourcePolicyOutput,
crate::error::PutResourcePolicyError,
crate::input::PutResourcePolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>Name of the new policy. This parameter is required.</p>
pub fn policy_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.policy_name(inp);
self
}
/// <p>Name of the new policy. This parameter is required.</p>
pub fn set_policy_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_policy_name(input);
self
}
/// <p>Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string.
/// This parameter is required.</p>
/// <p>The following example creates a resource policy enabling the Route 53 service to put
/// DNS query logs in to the specified log group. Replace <code>"logArn"</code> with the ARN of
/// your CloudWatch Logs resource, such as a log group or log stream.</p>
/// <p>CloudWatch Logs also supports <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourcearn">aws:SourceArn</a>
/// and <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceaccount">aws:SourceAccount</a>
/// condition context keys.</p>
/// <p>In the example resource policy, you would replace the value of <code>SourceArn</code> with the resource making the
/// call from Route 53 to CloudWatch Logs and replace the value of <code>SourceAccount</code> with
/// the Amazon Web Services account ID making that call.</p>
/// <p></p>
/// <p>
/// <code>{
/// "Version": "2012-10-17",
/// "Statement": [
/// {
/// "Sid": "Route53LogsToCloudWatchLogs",
/// "Effect": "Allow",
/// "Principal": {
/// "Service": [
/// "route53.amazonaws.com"
/// ]
/// },
/// "Action": "logs:PutLogEvents",
/// "Resource": "logArn",
/// "Condition": {
/// "ArnLike": {
/// "aws:SourceArn": "myRoute53ResourceArn"
/// },
/// "StringEquals": {
/// "aws:SourceAccount": "myAwsAccountId"
/// }
/// }
/// }
/// ]
/// }</code>
///
/// </p>
pub fn policy_document(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.policy_document(inp);
self
}
/// <p>Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string.
/// This parameter is required.</p>
/// <p>The following example creates a resource policy enabling the Route 53 service to put
/// DNS query logs in to the specified log group. Replace <code>"logArn"</code> with the ARN of
/// your CloudWatch Logs resource, such as a log group or log stream.</p>
/// <p>CloudWatch Logs also supports <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourcearn">aws:SourceArn</a>
/// and <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceaccount">aws:SourceAccount</a>
/// condition context keys.</p>
/// <p>In the example resource policy, you would replace the value of <code>SourceArn</code> with the resource making the
/// call from Route 53 to CloudWatch Logs and replace the value of <code>SourceAccount</code> with
/// the Amazon Web Services account ID making that call.</p>
/// <p></p>
/// <p>
/// <code>{
/// "Version": "2012-10-17",
/// "Statement": [
/// {
/// "Sid": "Route53LogsToCloudWatchLogs",
/// "Effect": "Allow",
/// "Principal": {
/// "Service": [
/// "route53.amazonaws.com"
/// ]
/// },
/// "Action": "logs:PutLogEvents",
/// "Resource": "logArn",
/// "Condition": {
/// "ArnLike": {
/// "aws:SourceArn": "myRoute53ResourceArn"
/// },
/// "StringEquals": {
/// "aws:SourceAccount": "myAwsAccountId"
/// }
/// }
/// }
/// ]
/// }</code>
///
/// </p>
pub fn set_policy_document(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_policy_document(input);
self
}
}
/// Fluent builder constructing a request to `PutRetentionPolicy`.
///
/// <p>Sets the retention of the specified log group. A retention policy allows you to
/// configure the number of days for which to retain log events in the specified log
/// group.</p>
#[derive(std::fmt::Debug)]
pub struct PutRetentionPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_retention_policy_input::Builder,
}
impl<C, M, R> PutRetentionPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutRetentionPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutRetentionPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::PutRetentionPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutRetentionPolicyInputOperationOutputAlias,
crate::output::PutRetentionPolicyOutput,
crate::error::PutRetentionPolicyError,
crate::input::PutRetentionPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>The number of days to retain the log events in the specified log group.
/// Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.</p>
/// <p>To set a log group to never have log events expire, use
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html">DeleteRetentionPolicy</a>.
/// </p>
pub fn retention_in_days(mut self, inp: i32) -> Self {
self.inner = self.inner.retention_in_days(inp);
self
}
/// <p>The number of days to retain the log events in the specified log group.
/// Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.</p>
/// <p>To set a log group to never have log events expire, use
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html">DeleteRetentionPolicy</a>.
/// </p>
pub fn set_retention_in_days(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_retention_in_days(input);
self
}
}
/// Fluent builder constructing a request to `PutSubscriptionFilter`.
///
/// <p>Creates or updates a subscription filter and associates it with the specified log
/// group. Subscription filters allow you to subscribe to a real-time stream of log events
/// ingested through <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html">PutLogEvents</a> and have them delivered to a specific
/// destination. When log events are sent to the
/// receiving service, they are Base64 encoded
/// and compressed with the gzip format.</p>
/// <p>The following destinations are supported for subscription filters:</p>
/// <ul>
/// <li>
/// <p>An Amazon Kinesis stream belonging to the same account as the subscription filter,
/// for same-account delivery.</p>
/// </li>
/// <li>
/// <p>A logical destination that belongs to a different account, for cross-account delivery.</p>
/// </li>
/// <li>
/// <p>An Amazon Kinesis Firehose delivery stream that belongs to the same account as the
/// subscription filter, for same-account delivery.</p>
/// </li>
/// <li>
/// <p>An Lambda function that belongs to the same account as the subscription filter,
/// for same-account delivery.</p>
/// </li>
/// </ul>
/// <p>Each log group can have up to two subscription filters associated with it. If you are
/// updating an existing filter, you must specify the correct name in <code>filterName</code>.
/// </p>
/// <p>To perform a <code>PutSubscriptionFilter</code> operation, you must also have the
/// <code>iam:PassRole</code> permission.</p>
#[derive(std::fmt::Debug)]
pub struct PutSubscriptionFilter<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_subscription_filter_input::Builder,
}
impl<C, M, R> PutSubscriptionFilter<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutSubscriptionFilter`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutSubscriptionFilterOutput,
aws_smithy_http::result::SdkError<crate::error::PutSubscriptionFilterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutSubscriptionFilterInputOperationOutputAlias,
crate::output::PutSubscriptionFilterOutput,
crate::error::PutSubscriptionFilterError,
crate::input::PutSubscriptionFilterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// <p>A name for the subscription filter. If you are updating an existing filter, you must
/// specify the correct name in <code>filterName</code>. To find the name of the filter currently
/// associated with a log group, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeSubscriptionFilters.html">DescribeSubscriptionFilters</a>.</p>
pub fn filter_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_name(inp);
self
}
/// <p>A name for the subscription filter. If you are updating an existing filter, you must
/// specify the correct name in <code>filterName</code>. To find the name of the filter currently
/// associated with a log group, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeSubscriptionFilters.html">DescribeSubscriptionFilters</a>.</p>
pub fn set_filter_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_filter_name(input);
self
}
/// <p>A filter pattern for subscribing to a filtered stream of log events.</p>
pub fn filter_pattern(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_pattern(inp);
self
}
/// <p>A filter pattern for subscribing to a filtered stream of log events.</p>
pub fn set_filter_pattern(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_pattern(input);
self
}
/// <p>The ARN of the destination to deliver matching log events to. Currently, the supported
/// destinations are:</p>
/// <ul>
/// <li>
/// <p>An Amazon Kinesis stream belonging to the same account as the subscription filter,
/// for same-account delivery.</p>
/// </li>
/// <li>
/// <p>A logical destination (specified using an ARN) belonging to a different account,
/// for cross-account delivery.</p>
/// <p>If you are setting up a cross-account subscription, the destination must have an
/// IAM policy associated with it that allows the sender to send logs to the destination.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html">PutDestinationPolicy</a>.</p>
/// </li>
/// <li>
/// <p>An Amazon Kinesis Firehose delivery stream belonging to the same account as the
/// subscription filter, for same-account delivery.</p>
/// </li>
/// <li>
/// <p>A Lambda function belonging to the same account as the subscription filter,
/// for same-account delivery.</p>
/// </li>
/// </ul>
pub fn destination_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.destination_arn(inp);
self
}
/// <p>The ARN of the destination to deliver matching log events to. Currently, the supported
/// destinations are:</p>
/// <ul>
/// <li>
/// <p>An Amazon Kinesis stream belonging to the same account as the subscription filter,
/// for same-account delivery.</p>
/// </li>
/// <li>
/// <p>A logical destination (specified using an ARN) belonging to a different account,
/// for cross-account delivery.</p>
/// <p>If you are setting up a cross-account subscription, the destination must have an
/// IAM policy associated with it that allows the sender to send logs to the destination.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html">PutDestinationPolicy</a>.</p>
/// </li>
/// <li>
/// <p>An Amazon Kinesis Firehose delivery stream belonging to the same account as the
/// subscription filter, for same-account delivery.</p>
/// </li>
/// <li>
/// <p>A Lambda function belonging to the same account as the subscription filter,
/// for same-account delivery.</p>
/// </li>
/// </ul>
pub fn set_destination_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_destination_arn(input);
self
}
/// <p>The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log
/// events to the destination stream. You don't need to provide the ARN when you are working with
/// a logical destination for cross-account delivery.</p>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log
/// events to the destination stream. You don't need to provide the ARN when you are working with
/// a logical destination for cross-account delivery.</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
/// <p>The method used to distribute log data to the destination. By default, log data is
/// grouped by log stream, but the grouping can be set to random for a more even distribution.
/// This property is only applicable when the destination is an Amazon Kinesis stream. </p>
pub fn distribution(mut self, inp: crate::model::Distribution) -> Self {
self.inner = self.inner.distribution(inp);
self
}
/// <p>The method used to distribute log data to the destination. By default, log data is
/// grouped by log stream, but the grouping can be set to random for a more even distribution.
/// This property is only applicable when the destination is an Amazon Kinesis stream. </p>
pub fn set_distribution(
mut self,
input: std::option::Option<crate::model::Distribution>,
) -> Self {
self.inner = self.inner.set_distribution(input);
self
}
}
/// Fluent builder constructing a request to `StartQuery`.
///
/// <p>Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group
/// and time range to query and the query string to use.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html">CloudWatch Logs Insights Query Syntax</a>.</p>
///
/// <p>Queries time out after 15 minutes of execution. If your queries are timing out, reduce the
/// time range being searched or partition your query into a number of queries.</p>
#[derive(std::fmt::Debug)]
pub struct StartQuery<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_query_input::Builder,
}
impl<C, M, R> StartQuery<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartQuery`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartQueryOutput,
aws_smithy_http::result::SdkError<crate::error::StartQueryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartQueryInputOperationOutputAlias,
crate::output::StartQueryOutput,
crate::error::StartQueryError,
crate::input::StartQueryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The log group on which to perform the query.</p>
/// <p>A <code>StartQuery</code> operation must include a <code>logGroupNames</code> or a <code>logGroupName</code> parameter, but
/// not both.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The log group on which to perform the query.</p>
/// <p>A <code>StartQuery</code> operation must include a <code>logGroupNames</code> or a <code>logGroupName</code> parameter, but
/// not both.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// Appends an item to `logGroupNames`.
///
/// To override the contents of this collection use [`set_log_group_names`](Self::set_log_group_names).
///
/// <p>The list of log groups to be queried. You can include up to 20 log groups.</p>
/// <p>A <code>StartQuery</code> operation must include a <code>logGroupNames</code> or a <code>logGroupName</code> parameter, but
/// not both.</p>
pub fn log_group_names(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_names(inp);
self
}
/// <p>The list of log groups to be queried. You can include up to 20 log groups.</p>
/// <p>A <code>StartQuery</code> operation must include a <code>logGroupNames</code> or a <code>logGroupName</code> parameter, but
/// not both.</p>
pub fn set_log_group_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_log_group_names(input);
self
}
/// <p>The beginning of the time range to query. The range is inclusive, so the specified
/// start time is included in the query. Specified as epoch time, the
/// number of seconds since January 1, 1970, 00:00:00 UTC.</p>
pub fn start_time(mut self, inp: i64) -> Self {
self.inner = self.inner.start_time(inp);
self
}
/// <p>The beginning of the time range to query. The range is inclusive, so the specified
/// start time is included in the query. Specified as epoch time, the
/// number of seconds since January 1, 1970, 00:00:00 UTC.</p>
pub fn set_start_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_start_time(input);
self
}
/// <p>The end of the time range to query. The range is inclusive, so the specified
/// end time is included in the query. Specified as epoch
/// time, the number of seconds since January 1, 1970, 00:00:00 UTC.</p>
pub fn end_time(mut self, inp: i64) -> Self {
self.inner = self.inner.end_time(inp);
self
}
/// <p>The end of the time range to query. The range is inclusive, so the specified
/// end time is included in the query. Specified as epoch
/// time, the number of seconds since January 1, 1970, 00:00:00 UTC.</p>
pub fn set_end_time(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_end_time(input);
self
}
/// <p>The query string to use.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html">CloudWatch Logs Insights Query Syntax</a>.</p>
pub fn query_string(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_string(inp);
self
}
/// <p>The query string to use.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html">CloudWatch Logs Insights Query Syntax</a>.</p>
pub fn set_query_string(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_query_string(input);
self
}
/// <p>The maximum number of log events to return in the query. If the query string uses the <code>fields</code> command,
/// only the specified fields and their values are returned. The default is 1000.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of log events to return in the query. If the query string uses the <code>fields</code> command,
/// only the specified fields and their values are returned. The default is 1000.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `StopQuery`.
///
/// <p>Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation
/// returns an error indicating that the specified query is not running.</p>
#[derive(std::fmt::Debug)]
pub struct StopQuery<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::stop_query_input::Builder,
}
impl<C, M, R> StopQuery<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StopQuery`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StopQueryOutput,
aws_smithy_http::result::SdkError<crate::error::StopQueryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StopQueryInputOperationOutputAlias,
crate::output::StopQueryOutput,
crate::error::StopQueryError,
crate::input::StopQueryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID number of the query to stop. To find this ID number, use
/// <code>DescribeQueries</code>.</p>
pub fn query_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.query_id(inp);
self
}
/// <p>The ID number of the query to stop. To find this ID number, use
/// <code>DescribeQueries</code>.</p>
pub fn set_query_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_query_id(input);
self
}
}
/// Fluent builder constructing a request to `TagLogGroup`.
///
/// <p>Adds or updates the specified tags for the specified log group.</p>
/// <p>To list the tags for a log group, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html">ListTagsLogGroup</a>.
/// To remove tags, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagLogGroup.html">UntagLogGroup</a>.</p>
/// <p>For more information about tags, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging">Tag Log Groups in Amazon CloudWatch Logs</a>
/// in the <i>Amazon CloudWatch Logs User Guide</i>.</p>
/// <p>CloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to
/// log groups using the <code>aws:Resource/<i>key-name</i>
/// </code> or <code>aws:TagKeys</code> condition keys.
/// For more information about using tags to control access, see
/// <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html">Controlling access to Amazon Web Services resources using tags</a>.</p>
#[derive(std::fmt::Debug)]
pub struct TagLogGroup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::tag_log_group_input::Builder,
}
impl<C, M, R> TagLogGroup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TagLogGroup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagLogGroupOutput,
aws_smithy_http::result::SdkError<crate::error::TagLogGroupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TagLogGroupInputOperationOutputAlias,
crate::output::TagLogGroupOutput,
crate::error::TagLogGroupError,
crate::input::TagLogGroupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The key-value pairs to use for the tags.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
/// <p>The key-value pairs to use for the tags.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `TestMetricFilter`.
///
/// <p>Tests the filter pattern of a metric filter against a sample of log event messages. You
/// can use this operation to validate the correctness of a metric filter pattern.</p>
#[derive(std::fmt::Debug)]
pub struct TestMetricFilter<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::test_metric_filter_input::Builder,
}
impl<C, M, R> TestMetricFilter<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TestMetricFilter`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TestMetricFilterOutput,
aws_smithy_http::result::SdkError<crate::error::TestMetricFilterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TestMetricFilterInputOperationOutputAlias,
crate::output::TestMetricFilterOutput,
crate::error::TestMetricFilterError,
crate::input::TestMetricFilterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A symbolic description of how CloudWatch Logs should interpret the data in each log
/// event. For example, a log event can contain timestamps, IP addresses, strings, and so on. You
/// use the filter pattern to specify what to look for in the log event message.</p>
pub fn filter_pattern(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_pattern(inp);
self
}
/// <p>A symbolic description of how CloudWatch Logs should interpret the data in each log
/// event. For example, a log event can contain timestamps, IP addresses, strings, and so on. You
/// use the filter pattern to specify what to look for in the log event message.</p>
pub fn set_filter_pattern(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_pattern(input);
self
}
/// Appends an item to `logEventMessages`.
///
/// To override the contents of this collection use [`set_log_event_messages`](Self::set_log_event_messages).
///
/// <p>The log event messages to test.</p>
pub fn log_event_messages(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_event_messages(inp);
self
}
/// <p>The log event messages to test.</p>
pub fn set_log_event_messages(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_log_event_messages(input);
self
}
}
/// Fluent builder constructing a request to `UntagLogGroup`.
///
/// <p>Removes the specified tags from the specified log group.</p>
/// <p>To list the tags for a log group, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html">ListTagsLogGroup</a>.
/// To add tags, use <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagLogGroup.html">TagLogGroup</a>.</p>
/// <p>CloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to
/// log groups using the <code>aws:Resource/<i>key-name</i>
/// </code> or <code>aws:TagKeys</code> condition keys.
/// </p>
#[derive(std::fmt::Debug)]
pub struct UntagLogGroup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::untag_log_group_input::Builder,
}
impl<C, M, R> UntagLogGroup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UntagLogGroup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagLogGroupOutput,
aws_smithy_http::result::SdkError<crate::error::UntagLogGroupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UntagLogGroupInputOperationOutputAlias,
crate::output::UntagLogGroupOutput,
crate::error::UntagLogGroupError,
crate::input::UntagLogGroupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the log group.</p>
pub fn log_group_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.log_group_name(inp);
self
}
/// <p>The name of the log group.</p>
pub fn set_log_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_log_group_name(input);
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tag keys. The corresponding tags are removed from the log group.</p>
pub fn tags(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tags(inp);
self
}
/// <p>The tag keys. The corresponding tags are removed from the log group.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
}
impl<C> Client<C, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::new(conn)
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
aws_hyper::AwsMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::https()
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 47.617559 | 327 | 0.603366 |
c1815330a6a127a378dd2851248aeb4080ff75ec | 18,029 | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An environment which is passed downward into the widget tree.
use std::any;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::marker::PhantomData;
use std::ops::Deref;
use std::sync::Arc;
use crate::localization::L10nManager;
use crate::{Color, Data, Point, Rect, Size};
/// An environment passed down through all widget traversals.
///
/// All widget methods have access to an environment, and it is passed
/// downwards during traversals.
///
/// A widget can retrieve theme parameters (colors, dimensions, etc.). In
/// addition, it can pass custom data down to all descendants. An important
/// example of the latter is setting a value for enabled/disabled status
/// so that an entire subtree can be disabled ("grayed out") with one
/// setting.
///
/// [`EnvScope`] can be used to override parts of `Env` for its descendants.
///
/// # Important
/// It is the programmer's responsibility to ensure that the environment
/// is used correctly. See [`Key`] for an example.
/// - [`Key`]s should be `const`s with unique names
/// - [`Key`]s must always be set before they are used.
/// - Values can only be overwritten by values of the same type.
///
/// [`EnvScope`]: widget/struct.EnvScope.html
/// [`Key`]: struct.Key.html
#[derive(Clone)]
pub struct Env(Arc<EnvImpl>);
#[derive(Clone)]
struct EnvImpl {
map: HashMap<String, Value>,
debug_colors: Vec<Color>,
l10n: Arc<L10nManager>,
}
/// A typed [`Env`] key.
///
/// This lets you retrieve values of a given type. The parameter
/// implements [`ValueType`]. For "expensive" types, this is a reference,
/// so the type for a string is `Key<&str>`.
///
/// # Examples
///
/// ```
///# use druid::{Key, Color, WindowDesc, AppLauncher, widget::Label};
/// const IMPORTANT_LABEL_COLOR: Key<Color> = Key::new("my-app.important-label-color");
///
/// fn important_label() -> Label<()> {
/// Label::new("Warning!").with_text_color(IMPORTANT_LABEL_COLOR)
/// }
///
/// fn main() {
/// let main_window = WindowDesc::new(important_label);
///
/// AppLauncher::with_window(main_window)
/// .configure_env(|env, _state| {
/// // The `Key` must be set before it is used.
/// env.set(IMPORTANT_LABEL_COLOR, Color::rgb(1.0, 0.0, 0.0));
/// });
/// }
/// ```
///
/// [`ValueType`]: trait.ValueType.html
/// [`Env`]: struct.Env.html
pub struct Key<T> {
key: &'static str,
value_type: PhantomData<T>,
}
// we could do some serious deriving here: the set of types that can be stored
// could be defined per-app
// Also consider Box<Any> (though this would also impact debug).
/// A dynamic type representing all values that can be stored in an environment.
#[derive(Clone)]
// ANCHOR: value_type
pub enum Value {
Point(Point),
Size(Size),
Rect(Rect),
Color(Color),
Float(f64),
Bool(bool),
UnsignedInt(u64),
String(String),
}
// ANCHOR_END: value_type
/// Either a concrete `T` or a [`Key<T>`] that can be resolved in the [`Env`].
///
/// This is a way to allow widgets to interchangeably use either a specific
/// value or a value from the environment for some purpose.
///
/// [`Key<T>`]: struct.Key.html
/// [`Env`]: struct.Env.html
pub enum KeyOrValue<T> {
Concrete(Value),
Key(Key<T>),
}
/// Values which can be stored in an environment.
///
/// Note that for "expensive" types this is the reference. For example,
/// for strings, this trait is implemented on `&'a str`. The trait is
/// parametrized on a lifetime so that it can be used for references in
/// this way.
pub trait ValueType<'a>: Sized {
/// The corresponding owned type.
type Owned: Into<Value>;
/// Attempt to convert the generic `Value` into this type.
fn try_from_value(v: &'a Value) -> Result<Self, ValueTypeError>;
}
/// The error type for environment access.
///
/// This error is expected to happen rarely, if ever, as it only
/// happens when the string part of keys collide but the types
/// mismatch.
#[derive(Debug, Clone)]
pub struct ValueTypeError {
expected: &'static str,
found: Value,
}
impl Env {
/// State for whether or not to paint colorful rectangles for layout
/// debugging.
///
/// Set by the `debug_paint_layout()` method on [`WidgetExt`]'.
///
/// [`WidgetExt`]: trait.WidgetExt.html
pub(crate) const DEBUG_PAINT: Key<bool> = Key::new("druid.built-in.debug-paint");
/// A key used to tell widgets to print additional debug information.
///
/// This does nothing by default; however you can check this key while
/// debugging a widget to limit println spam.
///
/// For convenience, this key can be set with the [`WidgetExt::debug_widget`]
/// method.
///
/// # Examples
///
/// ```no_run
/// # use druid::Env;
/// # let env = Env::default();
/// # let widget_id = 0;
/// # let my_rect = druid::Rect::ZERO;
/// if env.get(Env::DEBUG_WIDGET) {
/// eprintln!("widget {:?} bounds: {:?}", widget_id, my_rect);
/// }
/// ```
///
/// [`WidgetExt::debug_widget`]: trait.WidgetExt.html#method.debug_widget
pub const DEBUG_WIDGET: Key<bool> = Key::new("druid.built-in.debug-widget");
/// Gets a value from the environment, expecting it to be present.
///
/// Note that the return value is a reference for "expensive" types such
/// as strings, but an ordinary value for "cheap" types such as numbers
/// and colors.
///
/// # Panics
///
/// Panics if the key is not found, or if it is present with the wrong type.
pub fn get<'a, V: ValueType<'a>>(&'a self, key: impl Borrow<Key<V>>) -> V {
let key = key.borrow();
if let Some(value) = self.0.map.get(key.key) {
value.to_inner_unchecked()
} else {
panic!("key for {} not found", key.key)
}
}
/// Gets a value from the environment.
///
/// # Panics
///
/// Panics if the value for the key is found, but has the wrong type.
pub fn try_get<'a, V: ValueType<'a>>(&'a self, key: impl Borrow<Key<V>>) -> Option<V> {
self.0
.map
.get(key.borrow().key)
.map(|value| value.to_inner_unchecked())
}
/// Gets a value from the environment, in its encapsulated [`Value`] form,
/// expecting the key to be present.
///
/// *WARNING:* This is not intended for general use, but only for inspecting an `Env` e.g.
/// for debugging, theme editing, and theme loading.
///
/// # Panics
///
/// Panics if the key is not found
/// [`Value`]: enum.Value.html
pub fn get_untyped(&self, key: impl Borrow<Key<()>>) -> &Value {
let key = key.borrow();
if let Some(value) = self.0.map.get(key.key) {
value
} else {
panic!("key for {} not found", key.key)
}
}
/// Gets a value from the environment, in its encapsulated [`Value`] form,
/// returning None if a value isn't found.
///
/// *WARNING:* This is not intended for general use, but only for inspecting an `Env` e.g.
/// for debugging, theme editing, and theme loading.
/// [`Value`]: enum.Value.html
pub fn try_get_untyped(&self, key: impl Borrow<Key<()>>) -> Option<&Value> {
self.0.map.get(key.borrow().key)
}
/// Gets the entire contents of the `Env`, in key-value pairs.
///
/// *WARNING:* This is not intended for general use, but only for inspecting an `Env` e.g.
/// for debugging, theme editing, and theme loading.
pub fn get_all(&self) -> impl ExactSizeIterator<Item = (&String, &Value)> {
self.0.map.iter()
}
/// Adds a key/value, acting like a builder.
pub fn adding<'a, V: ValueType<'a>>(mut self, key: Key<V>, value: impl Into<V::Owned>) -> Env {
let env = Arc::make_mut(&mut self.0);
env.map.insert(key.into(), value.into().into());
self
}
/// Sets a value in an environment.
///
/// # Panics
///
/// Panics if the environment already has a value for the key, but it is
/// of a different type.
pub fn set<'a, V: ValueType<'a>>(&'a mut self, key: Key<V>, value: impl Into<V::Owned>) {
let env = Arc::make_mut(&mut self.0);
let value = value.into().into();
let key = key.into();
// TODO: use of Entry might be more efficient
if let Some(existing) = env.map.get(&key) {
if !existing.is_same_type(&value) {
panic!(
"Invalid type for key '{}': {:?} differs in kind from {:?}",
key, existing, value
);
}
}
env.map.insert(key, value);
}
/// Returns a reference to the [`L10nManager`], which handles localization
/// resources.
///
/// [`L10nManager`]: struct.L10nManager.html
pub(crate) fn localization_manager(&self) -> &L10nManager {
&self.0.l10n
}
/// Given an id, returns one of 18 distinct colors
#[doc(hidden)]
pub fn get_debug_color(&self, id: u64) -> Color {
let color_num = id as usize % self.0.debug_colors.len();
self.0.debug_colors[color_num].clone()
}
}
impl<T> Key<T> {
/// Create a new strongly typed `Key` with the given string value.
/// The type of the key will be inferred.
///
/// # Examples
///
/// ```
/// use druid::Key;
/// use druid::piet::Color;
///
/// let float_key: Key<f64> = Key::new("a.very.good.float");
/// let color_key: Key<Color> = Key::new("a.very.nice.color");
/// ```
pub const fn new(key: &'static str) -> Self {
Key {
key,
value_type: PhantomData,
}
}
}
impl Key<()> {
/// Create an untyped `Key` with the given string value.
///
/// *WARNING:* This is not for general usage - it's only useful
/// for inspecting the contents of an [`Env`] - this is expected to be
/// used for debugging, loading, and manipulating themes.
///
/// [`Env`]: struct.Env.html
pub const fn untyped(key: &'static str) -> Self {
Key {
key,
value_type: PhantomData,
}
}
}
impl Value {
/// Get a reference to the inner object.
///
/// # Panics
///
/// Panics when the value variant doesn't match the provided type.
pub fn to_inner_unchecked<'a, V: ValueType<'a>>(&'a self) -> V {
match ValueType::try_from_value(self) {
Ok(v) => v,
Err(s) => panic!("{}", s),
}
}
fn is_same_type(&self, other: &Value) -> bool {
use Value::*;
match (self, other) {
(Point(_), Point(_)) => true,
(Size(_), Size(_)) => true,
(Rect(_), Rect(_)) => true,
(Color(_), Color(_)) => true,
(Float(_), Float(_)) => true,
(Bool(_), Bool(_)) => true,
(UnsignedInt(_), UnsignedInt(_)) => true,
(String(_), String(_)) => true,
_ => false,
}
}
}
impl Debug for Value {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self {
Value::Point(p) => write!(f, "Point {:?}", p),
Value::Size(s) => write!(f, "Size {:?}", s),
Value::Rect(r) => write!(f, "Rect {:?}", r),
Value::Color(c) => write!(f, "Color {:?}", c),
Value::Float(x) => write!(f, "Float {}", x),
Value::Bool(b) => write!(f, "Bool {}", b),
Value::UnsignedInt(x) => write!(f, "UnsignedInt {}", x),
Value::String(s) => write!(f, "String {:?}", s),
}
}
}
impl Data for Value {
fn same(&self, other: &Value) -> bool {
use Value::*;
match (self, other) {
(Point(p1), Point(p2)) => p1.x.same(&p2.x) && p1.y.same(&p2.y),
(Rect(r1), Rect(r2)) => {
r1.x0.same(&r2.x0) && r1.y0.same(&r2.y0) && r1.x1.same(&r2.x1) && r1.y1.same(&r2.y1)
}
(Size(s1), Size(s2)) => s1.width.same(&s2.width) && s1.height.same(&s2.height),
(Color(c1), Color(c2)) => c1.as_rgba_u32() == c2.as_rgba_u32(),
(Float(f1), Float(f2)) => f1.same(&f2),
(Bool(b1), Bool(b2)) => b1 == b2,
(UnsignedInt(f1), UnsignedInt(f2)) => f1.same(&f2),
(String(s1), String(s2)) => s1 == s2,
_ => false,
}
}
}
impl Data for Env {
fn same(&self, other: &Env) -> bool {
Arc::ptr_eq(&self.0, &other.0) || self.0.deref().same(other.0.deref())
}
}
impl Data for EnvImpl {
fn same(&self, other: &EnvImpl) -> bool {
self.map.len() == other.map.len()
&& self
.map
.iter()
.all(|(k, v1)| other.map.get(k).map(|v2| v1.same(v2)).unwrap_or(false))
}
}
impl Default for Env {
fn default() -> Self {
let l10n = L10nManager::new(vec!["builtin.ftl".into()], "./resources/i18n/");
// Colors are from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
// They're picked for visual distinction and accessbility (99 percent)
let debug_colors = vec![
Color::rgb8(230, 25, 75),
Color::rgb8(60, 180, 75),
Color::rgb8(255, 225, 25),
Color::rgb8(0, 130, 200),
Color::rgb8(245, 130, 48),
Color::rgb8(70, 240, 240),
Color::rgb8(240, 50, 230),
Color::rgb8(250, 190, 190),
Color::rgb8(0, 128, 128),
Color::rgb8(230, 190, 255),
Color::rgb8(170, 110, 40),
Color::rgb8(255, 250, 200),
Color::rgb8(128, 0, 0),
Color::rgb8(170, 255, 195),
Color::rgb8(0, 0, 128),
Color::rgb8(128, 128, 128),
Color::rgb8(255, 255, 255),
Color::rgb8(0, 0, 0),
];
let inner = EnvImpl {
l10n: Arc::new(l10n),
map: HashMap::new(),
debug_colors,
};
Env(Arc::new(inner))
.adding(Env::DEBUG_PAINT, false)
.adding(Env::DEBUG_WIDGET, false)
}
}
impl<T> From<Key<T>> for String {
fn from(src: Key<T>) -> String {
String::from(src.key)
}
}
impl ValueTypeError {
fn new(expected: &'static str, found: Value) -> ValueTypeError {
ValueTypeError { expected, found }
}
}
impl std::fmt::Display for ValueTypeError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"Incorrect value type: expected {} found {:?}",
self.expected, self.found
)
}
}
impl std::error::Error for ValueTypeError {}
/// Use this macro for types which are cheap to clone (ie all `Copy` types).
macro_rules! impl_value_type_owned {
($ty:ty, $var:ident) => {
impl<'a> ValueType<'a> for $ty {
type Owned = $ty;
fn try_from_value(value: &Value) -> Result<Self, ValueTypeError> {
match value {
Value::$var(f) => Ok(f.to_owned()),
other => Err(ValueTypeError::new(any::type_name::<$ty>(), other.clone())),
}
}
}
impl Into<Value> for $ty {
fn into(self) -> Value {
Value::$var(self)
}
}
};
}
/// Use this macro for types which require allocation but are not too
/// expensive to clone.
macro_rules! impl_value_type_borrowed {
($ty:ty, $owned:ty, $var:ident) => {
impl<'a> ValueType<'a> for &'a $ty {
type Owned = $owned;
fn try_from_value(value: &'a Value) -> Result<Self, ValueTypeError> {
match value {
Value::$var(f) => Ok(f),
other => Err(ValueTypeError::new(any::type_name::<$ty>(), other.clone())),
}
}
}
impl Into<Value> for $owned {
fn into(self) -> Value {
Value::$var(self)
}
}
};
}
impl_value_type_owned!(f64, Float);
impl_value_type_owned!(bool, Bool);
impl_value_type_owned!(u64, UnsignedInt);
impl_value_type_owned!(Color, Color);
impl_value_type_owned!(Rect, Rect);
impl_value_type_owned!(Point, Point);
impl_value_type_owned!(Size, Size);
impl_value_type_borrowed!(str, String, String);
impl<'a, T: ValueType<'a>> KeyOrValue<T> {
pub fn resolve(&'a self, env: &'a Env) -> T {
match self {
KeyOrValue::Concrete(value) => value.to_inner_unchecked(),
KeyOrValue::Key(key) => env.get(key),
}
}
}
impl<'a, V: Into<Value>, T: ValueType<'a, Owned = V>> From<V> for KeyOrValue<T> {
fn from(value: V) -> KeyOrValue<T> {
KeyOrValue::Concrete(value.into())
}
}
impl<'a, T: ValueType<'a>> From<Key<T>> for KeyOrValue<T> {
fn from(key: Key<T>) -> KeyOrValue<T> {
KeyOrValue::Key(key)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn string_key_or_value() {
const MY_KEY: Key<&str> = Key::new("test.my-string-key");
let env = Env::default().adding(MY_KEY, "Owned".to_string());
assert_eq!(env.get(MY_KEY), "Owned");
let key: KeyOrValue<&str> = MY_KEY.into();
let value: KeyOrValue<&str> = "Owned".to_string().into();
assert_eq!(key.resolve(&env), value.resolve(&env));
}
}
| 32.194643 | 100 | 0.563481 |
8fd5c1244af1343e1eec7fc988ed639c0b2f874b | 313 | //! Infrastructure containing device configuration used for computations.
mod graphical_pass;
mod builder;
pub use graphical_pass::*;
pub use builder::{GraphicalPassBuilder, PrimitiveTopology, StoreOp, LoadOp};
pub use vulkano::descriptor::descriptor_set::{FixedSizeDescriptorSet, FixedSizeDescriptorSetsPool};
| 34.777778 | 99 | 0.824281 |
037ad484944a5c290099d59e03d01f4cf50f88f7 | 4,737 | wit_bindgen_wasmer::export!("./tests/runtime/handles/imports.wit");
use anyhow::Result;
use imports::*;
use std::cell::RefCell;
use wasmer::WasmerEnv;
#[derive(Default, WasmerEnv, Clone)]
pub struct MyImports {
host_state2_closed: bool,
}
#[derive(Debug)]
pub struct SuchState(u32);
#[derive(Default, Debug)]
pub struct Markdown {
buf: RefCell<String>,
}
impl Imports for MyImports {
type HostState = SuchState;
type HostState2 = ();
type Markdown2 = Markdown;
type OddName = ();
fn host_state_create(&mut self) -> SuchState {
SuchState(100)
}
fn host_state_get(&mut self, state: &SuchState) -> u32 {
state.0
}
fn host_state2_create(&mut self) {}
fn host_state2_saw_close(&mut self) -> bool {
self.host_state2_closed
}
fn drop_host_state2(&mut self, _state: ()) {
self.host_state2_closed = true;
}
fn two_host_states(&mut self, _a: &SuchState, _b: &()) -> (SuchState, ()) {
(SuchState(2), ())
}
fn host_state2_param_record(&mut self, _a: HostStateParamRecord<'_, Self>) {}
fn host_state2_param_tuple(&mut self, _a: (&'_ (),)) {}
fn host_state2_param_option(&mut self, _a: Option<&'_ ()>) {}
fn host_state2_param_result(&mut self, _a: Result<&'_ (), u32>) {}
fn host_state2_param_variant(&mut self, _a: HostStateParamVariant<'_, Self>) {}
fn host_state2_param_list(&mut self, _a: Vec<&()>) {}
fn host_state2_result_record(&mut self) -> HostStateResultRecord<Self> {
HostStateResultRecord { a: () }
}
fn host_state2_result_tuple(&mut self) -> ((),) {
((),)
}
fn host_state2_result_option(&mut self) -> Option<()> {
Some(())
}
fn host_state2_result_result(&mut self) -> Result<(), u32> {
Ok(())
}
fn host_state2_result_variant(&mut self) -> HostStateResultVariant<Self> {
HostStateResultVariant::V0(())
}
fn host_state2_result_list(&mut self) -> Vec<()> {
vec![(), ()]
}
fn markdown2_create(&mut self) -> Markdown {
Markdown::default()
}
fn markdown2_append(&mut self, md: &Markdown, buf: &str) {
md.buf.borrow_mut().push_str(buf);
}
fn markdown2_render(&mut self, md: &Markdown) -> String {
md.buf.borrow().replace("red", "green")
}
fn odd_name_create(&mut self) {}
fn odd_name_frob_the_odd(&mut self, _: &()) {}
}
wit_bindgen_wasmer::import!("./tests/runtime/handles/exports.wit");
fn run(wasm: &str) -> Result<()> {
use exports::*;
let exports = crate::instantiate(
wasm,
|store, import_object| imports::add_to_imports(store, import_object, MyImports::default()),
|store, module, import_object| exports::Exports::instantiate(store, module, import_object),
)?;
exports.test_imports()?;
let s: WasmState = exports.wasm_state_create()?;
assert_eq!(exports.wasm_state_get_val(&s)?, 100);
exports.drop_wasm_state(s)?;
assert_eq!(exports.wasm_state2_saw_close()?, false);
let s: WasmState2 = exports.wasm_state2_create()?;
assert_eq!(exports.wasm_state2_saw_close()?, false);
exports.drop_wasm_state2(s)?;
assert_eq!(exports.wasm_state2_saw_close()?, true);
let a = exports.wasm_state_create()?;
let b = exports.wasm_state2_create()?;
let (s1, s2) = exports.two_wasm_states(&a, &b)?;
exports.drop_wasm_state(a)?;
exports.drop_wasm_state(s1)?;
exports.drop_wasm_state2(b)?;
exports.wasm_state2_param_record(WasmStateParamRecord { a: &s2 })?;
exports.wasm_state2_param_tuple((&s2,))?;
exports.wasm_state2_param_option(Some(&s2))?;
exports.wasm_state2_param_option(None)?;
exports.wasm_state2_param_result(Ok(&s2))?;
exports.wasm_state2_param_result(Err(2))?;
exports.wasm_state2_param_variant(WasmStateParamVariant::V0(&s2))?;
exports.wasm_state2_param_variant(WasmStateParamVariant::V1(2))?;
exports.wasm_state2_param_list(&[])?;
exports.wasm_state2_param_list(&[&s2])?;
exports.wasm_state2_param_list(&[&s2, &s2])?;
exports.drop_wasm_state2(s2)?;
let s = exports.wasm_state2_result_record()?.a;
exports.drop_wasm_state2(s)?;
let s = exports.wasm_state2_result_tuple()?.0;
exports.drop_wasm_state2(s)?;
let s = exports.wasm_state2_result_option()?.unwrap();
exports.drop_wasm_state2(s)?;
let s = exports.wasm_state2_result_result()?.unwrap();
match exports.wasm_state2_result_variant()? {
WasmStateResultVariant::V0(s) => exports.drop_wasm_state2(s)?,
WasmStateResultVariant::V1(_) => panic!(),
}
exports.drop_wasm_state2(s)?;
for s in exports.wasm_state2_result_list()? {
exports.drop_wasm_state2(s)?;
}
Ok(())
}
| 31.370861 | 99 | 0.654 |
e25d2a76412ba1a4866b05cc7b60367f8a86954e | 6,257 | use crate::common::{Error, Result};
use crate::table::format::ChecksumType::CRC32c;
use crate::util::{decode_fixed_uint32, encode_var_uint64, get_var_uint32, get_var_uint64};
pub const MAX_BLOCK_SIZE_SUPPORTED_BY_HASH_INDEX: usize = 1usize << 16;
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
pub struct BlockHandle {
pub offset: u64,
pub size: u64,
}
impl BlockHandle {
pub fn new(offset: u64, size: u64) -> BlockHandle {
Self { offset, size }
}
pub fn encode_to(&self, data: &mut Vec<u8>) {
let mut tmp: [u8; 20] = [0u8; 20];
let offset1 = encode_var_uint64(&mut tmp, self.offset);
let offset2 = encode_var_uint64(&mut tmp[offset1..], self.size);
data.extend_from_slice(&tmp[..(offset1 + offset2)]);
}
pub fn decode_from(&mut self, data: &[u8]) -> Result<usize> {
let mut offset = 0;
match get_var_uint64(data, &mut offset) {
None => return Err(Error::VarDecode("BlockHandle")),
Some(val) => {
self.offset = val;
}
};
match get_var_uint64(&data[offset..], &mut offset) {
None => Err(Error::VarDecode("BlockHandle")),
Some(val) => {
self.size = val;
Ok(offset)
}
}
}
}
pub const NULL_BLOCK_HANDLE: BlockHandle = BlockHandle { offset: 0, size: 0 };
#[derive(Default, Debug, PartialEq, Eq, Clone)]
pub struct IndexValue {
pub handle: BlockHandle,
}
impl IndexValue {
pub fn decode_from(&mut self, data: &[u8]) -> Result<()> {
self.handle.decode_from(data)?;
Ok(())
}
}
pub struct IndexValueRef<'a> {
pub handle: &'a BlockHandle,
}
impl<'a> IndexValueRef<'a> {
pub fn new(handle: &'a BlockHandle) -> Self {
Self { handle }
}
pub fn to_owned(&self) -> IndexValue {
IndexValue {
handle: self.handle.clone(),
}
}
pub fn encode_to(&self, buff: &mut Vec<u8>) {
self.handle.encode_to(buff);
// TODO: support encode the first key
}
}
impl IndexValue {
pub fn as_ref(&self) -> IndexValueRef {
IndexValueRef {
handle: &self.handle,
}
}
}
#[derive(Clone, Copy)]
pub enum ChecksumType {
NoChecksum = 0x0,
CRC32c = 0x1,
// xxHash = 0x2,
// xxHash64 = 0x3,
}
pub const LEGACY_BLOCK_BASED_TABLE_MAGIC_NUMBER: u64 = 0xdb4775248b80fb57u64;
pub const LEGACY_PLAIN_TABLE_MAGIC_NUMBER: u64 = 0x4f3418eb7a8f13b8u64;
pub const BLOCK_BASED_TABLE_MAGIC_NUMBER: u64 = 0x88e241b785f4cff7u64;
pub const PLAIN_TABLE_MAGIC_NUMBER: u64 = 0x8242229663bf9564u64;
const MAGIC_NUMBER_LENGTH_BYTE: usize = 8;
pub fn is_legacy_footer_format(magic_number: u64) -> bool {
magic_number == LEGACY_BLOCK_BASED_TABLE_MAGIC_NUMBER
|| magic_number == LEGACY_PLAIN_TABLE_MAGIC_NUMBER
}
#[derive(Default, Clone)]
pub struct Footer {
pub version: u32,
pub checksum: u8,
pub metaindex_handle: BlockHandle,
pub index_handle: BlockHandle,
pub table_magic_number: u64,
}
pub const BLOCK_HANDLE_MAX_ENCODED_LENGTH: usize = 20;
pub const VERSION0ENCODED_LENGTH: usize = 2 * BLOCK_HANDLE_MAX_ENCODED_LENGTH + 8;
pub const NEW_VERSIONS_ENCODED_LENGTH: usize = 1 + 2 * BLOCK_HANDLE_MAX_ENCODED_LENGTH + 4 + 8;
impl Footer {
pub fn set_checksum(&mut self, ck: ChecksumType) {
self.checksum = ck as u8;
}
pub fn encode_to(&self, buf: &mut Vec<u8>) {
if is_legacy_footer_format(self.table_magic_number) {
let origin_size = buf.len();
assert_eq!(self.checksum, ChecksumType::CRC32c as u8);
self.metaindex_handle.encode_to(buf);
self.index_handle.encode_to(buf);
buf.resize(origin_size + BLOCK_HANDLE_MAX_ENCODED_LENGTH * 2, 0);
let v1 = (self.table_magic_number & 0xffffffffu64) as u32;
let v2 = (self.table_magic_number >> 32) as u32;
buf.extend_from_slice(&v1.to_le_bytes());
buf.extend_from_slice(&v2.to_le_bytes());
assert_eq!(buf.len(), origin_size + VERSION0ENCODED_LENGTH);
} else {
let origin_size = buf.len();
buf.push(self.checksum);
self.metaindex_handle.encode_to(buf);
self.index_handle.encode_to(buf);
buf.resize(origin_size + NEW_VERSIONS_ENCODED_LENGTH - 12, 0);
buf.extend_from_slice(&self.version.to_le_bytes());
let v1 = (self.table_magic_number & 0xffffffffu64) as u32;
let v2 = (self.table_magic_number >> 32) as u32;
buf.extend_from_slice(&v1.to_le_bytes());
buf.extend_from_slice(&v2.to_le_bytes());
assert_eq!(buf.len(), origin_size + NEW_VERSIONS_ENCODED_LENGTH);
}
}
pub fn decode_from(&mut self, data: &[u8]) -> Result<()> {
let magic_offset = data.len() - MAGIC_NUMBER_LENGTH_BYTE;
let magic_lo = decode_fixed_uint32(&data[magic_offset..]);
let magic_hi = decode_fixed_uint32(&data[(magic_offset + 4)..]);
let mut magic = ((magic_hi as u64) << 32) | (magic_lo as u64);
let legacy = is_legacy_footer_format(magic);
if legacy {
if magic == LEGACY_BLOCK_BASED_TABLE_MAGIC_NUMBER {
magic = BLOCK_BASED_TABLE_MAGIC_NUMBER;
} else if magic == LEGACY_PLAIN_TABLE_MAGIC_NUMBER {
magic = PLAIN_TABLE_MAGIC_NUMBER;
}
}
self.table_magic_number = magic;
let mut offset = if legacy {
self.version = 0;
self.checksum = CRC32c as u8;
data.len() - VERSION0ENCODED_LENGTH
} else {
self.version = decode_fixed_uint32(&data[(magic_offset - 4)..]);
let mut offset = data.len() - NEW_VERSIONS_ENCODED_LENGTH;
match get_var_uint32(&data[offset..], &mut offset) {
None => return Err(Error::VarDecode("BlockBasedTable Footer")),
Some(val) => {
self.checksum = val as u8;
}
}
offset
};
offset += self.metaindex_handle.decode_from(&data[offset..])?;
self.index_handle.decode_from(&data[offset..])?;
Ok(())
}
}
| 33.821622 | 95 | 0.611955 |
1a6a6b84cee54f99c3858d8a0e50add19ec657cd | 9,229 | //! Implementation defined RPC server errors
use {
crate::rpc_response::RpcSimulateTransactionResult,
jsonrpc_core::{Error, ErrorCode},
solana_sdk::clock::Slot,
solana_transaction_status::EncodeError,
thiserror::Error,
};
pub const JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP: i64 = -32001;
pub const JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE: i64 = -32002;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64 = -32003;
pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY: i64 = -32005;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
pub const JSON_RPC_SERVER_ERROR_SLOT_SKIPPED: i64 = -32007;
pub const JSON_RPC_SERVER_ERROR_NO_SNAPSHOT: i64 = -32008;
pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED: i64 = -32009;
pub const JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX: i64 = -32010;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011;
pub const JSON_RPC_SCAN_ERROR: i64 = -32012;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH: i64 = -32013;
pub const JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET: i64 = -32014;
pub const JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION: i64 = -32015;
pub const JSON_RPC_SERVER_ERROR_MIN_CONTEXT_SLOT_NOT_REACHED: i64 = -32016;
#[derive(Error, Debug)]
pub enum RpcCustomError {
#[error("BlockCleanedUp")]
BlockCleanedUp {
slot: Slot,
first_available_block: Slot,
},
#[error("SendTransactionPreflightFailure")]
SendTransactionPreflightFailure {
message: String,
result: RpcSimulateTransactionResult,
},
#[error("TransactionSignatureVerificationFailure")]
TransactionSignatureVerificationFailure,
#[error("BlockNotAvailable")]
BlockNotAvailable { slot: Slot },
#[error("NodeUnhealthy")]
NodeUnhealthy { num_slots_behind: Option<Slot> },
#[error("TransactionPrecompileVerificationFailure")]
TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError),
#[error("SlotSkipped")]
SlotSkipped { slot: Slot },
#[error("NoSnapshot")]
NoSnapshot,
#[error("LongTermStorageSlotSkipped")]
LongTermStorageSlotSkipped { slot: Slot },
#[error("KeyExcludedFromSecondaryIndex")]
KeyExcludedFromSecondaryIndex { index_key: String },
#[error("TransactionHistoryNotAvailable")]
TransactionHistoryNotAvailable,
#[error("ScanError")]
ScanError { message: String },
#[error("TransactionSignatureLenMismatch")]
TransactionSignatureLenMismatch,
#[error("BlockStatusNotAvailableYet")]
BlockStatusNotAvailableYet { slot: Slot },
#[error("UnsupportedTransactionVersion")]
UnsupportedTransactionVersion(u8),
#[error("MinContextSlotNotReached")]
MinContextSlotNotReached { context_slot: Slot },
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NodeUnhealthyErrorData {
pub num_slots_behind: Option<Slot>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MinContextSlotNotReachedErrorData {
pub context_slot: Slot,
}
impl From<EncodeError> for RpcCustomError {
fn from(err: EncodeError) -> Self {
match err {
EncodeError::UnsupportedTransactionVersion(version) => {
Self::UnsupportedTransactionVersion(version)
}
}
}
}
impl From<RpcCustomError> for Error {
fn from(e: RpcCustomError) -> Self {
match e {
RpcCustomError::BlockCleanedUp {
slot,
first_available_block,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP),
message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block,
),
data: None,
},
RpcCustomError::SendTransactionPreflightFailure { message, result } => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE,
),
message,
data: Some(serde_json::json!(result)),
},
RpcCustomError::TransactionSignatureVerificationFailure => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: "Transaction signature verification failure".to_string(),
data: None,
},
RpcCustomError::BlockNotAvailable { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE),
message: format!("Block not available for slot {}", slot),
data: None,
},
RpcCustomError::NodeUnhealthy { num_slots_behind } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY),
message: if let Some(num_slots_behind) = num_slots_behind {
format!("Node is behind by {} slots", num_slots_behind)
} else {
"Node is unhealthy".to_string()
},
data: Some(serde_json::json!(NodeUnhealthyErrorData {
num_slots_behind
})),
},
RpcCustomError::TransactionPrecompileVerificationFailure(e) => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: format!("Transaction precompile verification failure {:?}", e),
data: None,
},
RpcCustomError::SlotSkipped { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_SLOT_SKIPPED),
message: format!(
"Slot {} was skipped, or missing due to ledger jump to recent snapshot",
slot
),
data: None,
},
RpcCustomError::NoSnapshot => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_NO_SNAPSHOT),
message: "No snapshot".to_string(),
data: None,
},
RpcCustomError::LongTermStorageSlotSkipped { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED),
message: format!("Slot {} was skipped, or missing in long-term storage", slot),
data: None,
},
RpcCustomError::KeyExcludedFromSecondaryIndex { index_key } => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX,
),
message: format!(
"{} excluded from account secondary indexes; \
this RPC method unavailable for key",
index_key
),
data: None,
},
RpcCustomError::TransactionHistoryNotAvailable => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE,
),
message: "Transaction history is not available from this node".to_string(),
data: None,
},
RpcCustomError::ScanError { message } => Self {
code: ErrorCode::ServerError(JSON_RPC_SCAN_ERROR),
message,
data: None,
},
RpcCustomError::TransactionSignatureLenMismatch => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH,
),
message: "Transaction signature length mismatch".to_string(),
data: None,
},
RpcCustomError::BlockStatusNotAvailableYet { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET),
message: format!("Block status not yet available for slot {}", slot),
data: None,
},
RpcCustomError::UnsupportedTransactionVersion(version) => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION),
message: format!("Transaction version ({}) is not supported", version),
data: None,
},
RpcCustomError::MinContextSlotNotReached { context_slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_MIN_CONTEXT_SLOT_NOT_REACHED),
message: "Minimum context slot has not been reached".to_string(),
data: Some(serde_json::json!(MinContextSlotNotReachedErrorData {
context_slot,
})),
},
}
}
}
| 43.947619 | 100 | 0.626828 |
e5a286d96b8ec2c5af6e5fe8aa41a9c6e17f0bc7 | 27,783 | use dfn_candid::{candid, candid_one};
use dfn_protobuf::protobuf;
use ed25519_dalek::Keypair;
use ic_canister_client::Sender;
use ic_nns_common::pb::v1::NeuronId;
use ic_nns_constants::GOVERNANCE_CANISTER_ID;
use ic_nns_governance::pb::v1::{GovernanceError, Neuron, NeuronInfo};
use ic_nns_gtc::der_encode;
use ic_nns_gtc::pb::v1::AccountState;
use ic_nns_gtc::test_constants::{
TestIdentity, TEST_IDENTITY_1, TEST_IDENTITY_2, TEST_IDENTITY_3, TEST_IDENTITY_4,
};
use ic_nns_test_keys::{
TEST_NEURON_1_OWNER_KEYPAIR, TEST_NEURON_1_OWNER_PRINCIPAL, TEST_NEURON_2_OWNER_KEYPAIR,
TEST_NEURON_2_OWNER_PRINCIPAL,
};
use ic_nns_test_utils::itest_helpers::{
local_test_on_nns_subnet, NnsCanisters, NnsInitPayloadsBuilder,
};
use ledger_canister::{
AccountBalanceArgs, AccountIdentifier, Subaccount, Tokens, DEFAULT_TRANSFER_FEE,
};
use std::collections::HashSet;
use std::convert::TryFrom;
use std::sync::Arc;
use std::time::SystemTime;
/// Seed Round (SR) neurons are released over 48 months in the following tests
pub const SR_MONTHS_TO_RELEASE: u8 = 48;
/// Early Contributor Tokenholder (ECT) neurons are released over 12 months in
/// the following tests
pub const ECT_MONTHS_TO_RELEASE: u8 = 12;
const TEST_SR_ACCOUNTS: &[(&str, u32); 2] = &[
(TEST_IDENTITY_1.gtc_address, 1200),
(TEST_IDENTITY_3.gtc_address, 14500),
];
const TEST_ECT_ACCOUNTS: &[(&str, u32); 2] = &[
(TEST_IDENTITY_2.gtc_address, 8544),
(TEST_IDENTITY_4.gtc_address, 3789),
];
/// Test the GTC's `claim_neurons` method (and associated methods
/// `account_has_claimed_neurons` and `permanently_lock_account`)
#[test]
pub fn test_claim_neurons() {
local_test_on_nns_subnet(|runtime| async move {
let mut nns_init_payload_builder = NnsInitPayloadsBuilder::new();
add_test_gtc_neurons(&mut nns_init_payload_builder);
let donate_account_recipient_neuron_id =
get_donate_account_recipient_neuron_id(&nns_init_payload_builder);
nns_init_payload_builder
.genesis_token
.donate_account_recipient_neuron_id = Some(donate_account_recipient_neuron_id.clone());
let forward_all_unclaimed_accounts_recipient_neuron_id =
get_forward_whitelisted_unclaimed_accounts_recipient_neuron_id(
&nns_init_payload_builder,
);
nns_init_payload_builder
.genesis_token
.forward_whitelisted_unclaimed_accounts_recipient_neuron_id =
Some(forward_all_unclaimed_accounts_recipient_neuron_id.clone());
let nns_init_payload = nns_init_payload_builder.build();
let identity_1_neuron_ids = nns_init_payload
.genesis_token
.accounts
.get(TEST_IDENTITY_1.gtc_address)
.unwrap()
.neuron_ids
.clone();
assert_eq!(identity_1_neuron_ids.len(), SR_MONTHS_TO_RELEASE as usize);
let identity_2_neuron_ids = nns_init_payload
.genesis_token
.accounts
.get(TEST_IDENTITY_2.gtc_address)
.unwrap()
.neuron_ids
.clone();
assert_eq!(identity_2_neuron_ids.len(), ECT_MONTHS_TO_RELEASE as usize);
let nns_canisters = NnsCanisters::set_up(&runtime, nns_init_payload).await;
assert_neurons_can_only_be_claimed_by_account_owner(&nns_canisters).await;
assert_neurons_can_only_be_donated_by_account_owner(&nns_canisters).await;
assert_neurons_can_be_donated(
&nns_canisters,
donate_account_recipient_neuron_id,
&*TEST_NEURON_1_OWNER_KEYPAIR,
&TEST_IDENTITY_3,
)
.await;
// Assert that a Seed Round (SR) investor can claim their tokens
assert_neurons_can_be_claimed(&nns_canisters, identity_1_neuron_ids, &TEST_IDENTITY_1)
.await;
// Try to forward the whitelisted account. Note that this should only forward
// the whitelisted account so a non-whitelisted account should still be
// able to claim afterwards.
assert_unclaimed_neurons_can_be_forwarded(
&nns_canisters,
forward_all_unclaimed_accounts_recipient_neuron_id,
&*TEST_NEURON_2_OWNER_KEYPAIR,
)
.await;
// Assert that an Early Contributor Tokenholder (ECT) investor can claim their
// tokens
assert_neurons_can_be_claimed(&nns_canisters, identity_2_neuron_ids, &TEST_IDENTITY_2)
.await;
Ok(())
})
}
/// At Genesis, calls to `claim_neurons` and `forward_all_unclaimed_accounts`
/// should fail, as they both depend on a certain amount of time passing before
/// they are able to be called.
#[test]
pub fn test_gtc_at_genesis() {
local_test_on_nns_subnet(|runtime| async move {
let mut nns_init_payload_builder = NnsInitPayloadsBuilder::new();
add_test_gtc_neurons(&mut nns_init_payload_builder);
// Set the Genesis Moratorium to start now
nns_init_payload_builder
.genesis_token
.genesis_timestamp_seconds = SystemTime::now().elapsed().unwrap().as_secs();
let nns_init_payload = nns_init_payload_builder.build();
let nns_canisters = NnsCanisters::set_up(&runtime, nns_init_payload).await;
let gtc = nns_canisters.genesis_token;
let sign_cmd = move |msg: &[u8]| Ok(TEST_IDENTITY_1.sign(msg));
let sender = Sender::ExternalHsm {
pub_key: der_encode(&TEST_IDENTITY_1.public_key()),
sign: Arc::new(sign_cmd),
};
// Assert `claim_neurons` fails during the moratorium
let claim_neurons_response: Result<Result<Vec<NeuronId>, String>, String> = gtc
.update_from_sender(
"claim_neurons",
candid,
(TEST_IDENTITY_1.public_key_hex,),
&sender,
)
.await;
assert!(claim_neurons_response.unwrap().is_err());
// Assert that `TEST_IDENTITY_1` did not claim their neurons
let account_has_claimed_neurons_response: Result<Result<AccountState, String>, String> =
gtc.update_from_sender(
"get_account",
candid_one,
TEST_IDENTITY_1.gtc_address.to_string(),
&sender,
)
.await;
assert!(
!account_has_claimed_neurons_response
.unwrap()
.unwrap()
.has_claimed
);
// Assert that `forward_all_unclaimed_accounts` fails
let forward_all_unclaimed_accounts_response: Result<Result<(), String>, String> = gtc
.update_from_sender(
"forward_whitelisted_unclaimed_accounts",
candid_one,
(),
&sender,
)
.await;
assert!(forward_all_unclaimed_accounts_response.unwrap().is_err());
Ok(())
})
}
/// Assert that users can't claim other users' neurons
///
/// Identity 3 tries to claim Identity 1's neurons, but fails to do so
async fn assert_neurons_can_only_be_claimed_by_account_owner(nns_canisters: &NnsCanisters<'_>) {
let gtc = &nns_canisters.genesis_token;
let sign_cmd = move |msg: &[u8]| Ok(TEST_IDENTITY_3.sign(msg));
let sender = Sender::ExternalHsm {
pub_key: der_encode(&TEST_IDENTITY_3.public_key()),
sign: Arc::new(sign_cmd),
};
// Assert that one user can't claim another user's neurons
let claim_neurons_response: Result<Result<Vec<NeuronId>, String>, String> = gtc
.update_from_sender(
"claim_neurons",
candid,
(TEST_IDENTITY_1.public_key_hex,),
&sender,
)
.await;
assert!(claim_neurons_response.unwrap().is_err());
}
/// Assert that users can't donate other users' neurons
///
/// Identity 3 tries to donate Identity 1's neurons, but fails to do so
async fn assert_neurons_can_only_be_donated_by_account_owner(nns_canisters: &NnsCanisters<'_>) {
let gtc = &nns_canisters.genesis_token;
let sign_cmd = move |msg: &[u8]| Ok(TEST_IDENTITY_3.sign(msg));
let sender = Sender::ExternalHsm {
pub_key: der_encode(&TEST_IDENTITY_3.public_key()),
sign: Arc::new(sign_cmd),
};
// Assert that one user can't claim another user's neurons
let donate_account_response: Result<Result<(), String>, String> = gtc
.update_from_sender(
"donate_account",
candid,
(TEST_IDENTITY_1.public_key_hex,),
&sender,
)
.await;
assert!(donate_account_response.unwrap().is_err());
}
/// Assert that any user can forward an unclaimed GTC account.
///
/// This assumes the window after Genesis, during which the forwarding of
/// unclaimed accounts is forbidden, has expired.
async fn assert_unclaimed_neurons_can_be_forwarded(
nns_canisters: &NnsCanisters<'_>,
custodian_neuron_id: NeuronId,
custodian_key_pair: &Keypair,
) {
let gtc = &nns_canisters.genesis_token;
let governance = &nns_canisters.governance;
let ledger = &nns_canisters.ledger;
let sign_cmd = move |msg: &[u8]| Ok(TEST_IDENTITY_1.sign(msg));
let sender = Sender::ExternalHsm {
pub_key: der_encode(&TEST_IDENTITY_1.public_key()),
sign: Arc::new(sign_cmd),
};
// Assert that `TEST_IDENTITY_4` has not yet claimed or donated their neurons
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
TEST_IDENTITY_4.gtc_address.to_string(),
&sender,
)
.await;
let account_before_forward = get_account_response.unwrap().unwrap();
assert!(!account_before_forward.has_claimed);
assert!(!account_before_forward.has_donated);
assert!(!account_before_forward.has_forwarded);
// Calculate how much ICPT is expected to be forwarded to the custodian
// neuron.
let expected_custodian_account_balance_increase: Tokens = Tokens::from_e8s(
Tokens::from_tokens(account_before_forward.icpts as u64)
.unwrap()
.get_e8s()
- (DEFAULT_TRANSFER_FEE.get_e8s() * account_before_forward.neuron_ids.len() as u64),
);
// Get the custodian neuron and its ledger account, so that we can later
// assert that the account value has increased (as the result of
// forwarding).
let get_full_neuron_response: Result<Result<Neuron, GovernanceError>, String> = governance
.update_from_sender(
"get_full_neuron",
candid_one,
custodian_neuron_id.id,
&Sender::from_keypair(custodian_key_pair),
)
.await;
let custodian_neuron = get_full_neuron_response.unwrap().unwrap();
let custodian_subaccount = Subaccount::try_from(&custodian_neuron.account[..]).unwrap();
let custodian_account =
AccountIdentifier::new(GOVERNANCE_CANISTER_ID.get(), Some(custodian_subaccount));
let account_balance_response: Result<Tokens, String> = ledger
.query_from_sender(
"account_balance_pb",
protobuf,
AccountBalanceArgs {
account: custodian_account,
},
&Sender::from_keypair(custodian_key_pair),
)
.await;
let custodian_account_balance = account_balance_response.unwrap();
let expected_custodian_account_balance_after_forward =
(custodian_account_balance + expected_custodian_account_balance_increase).unwrap();
// Have `TEST_IDENTITY_1` forward `TEST_IDENTITY_2`'s and `TEST_IDENTITY_4`'s
// neurons
let forward_whitelisted_unclaimed_accounts_response: Result<Result<(), String>, String> = gtc
.update_from_sender(
"forward_whitelisted_unclaimed_accounts",
candid_one,
(),
&sender,
)
.await;
assert!(forward_whitelisted_unclaimed_accounts_response
.unwrap()
.is_ok());
// Assert that the forward updated the account state as expected
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
TEST_IDENTITY_4.gtc_address.to_string(),
&sender,
)
.await;
let account_after_forward = get_account_response.unwrap().unwrap();
assert!(!account_after_forward.has_claimed);
assert!(!account_after_forward.has_donated);
assert!(account_after_forward.has_forwarded);
assert_eq!(account_after_forward.authenticated_principal_id, None);
assert_eq!(
account_after_forward.successfully_transferred_neurons.len(),
account_before_forward.neuron_ids.len(),
);
// But has not forwarded not whitelisted accounts.
// Assert that the custodian neuron's ledger account has received the
// forwarded funds
let account_balance_response: Result<Tokens, String> = ledger
.query_from_sender(
"account_balance_pb",
protobuf,
AccountBalanceArgs {
account: custodian_account,
},
&Sender::from_keypair(custodian_key_pair),
)
.await;
let actual_custodian_account_balance_after_forward = account_balance_response.unwrap();
assert_eq!(
expected_custodian_account_balance_after_forward,
actual_custodian_account_balance_after_forward
);
// Assert that the custodian neuron's stake matches its ledger account
// balance
let get_full_neuron_response: Result<Result<Neuron, GovernanceError>, String> = governance
.update_from_sender(
"get_full_neuron",
candid_one,
custodian_neuron_id.id,
&Sender::from_keypair(custodian_key_pair),
)
.await;
let custodian_neuron = get_full_neuron_response.unwrap().unwrap();
let custodian_neuron_stake = Tokens::from_e8s(custodian_neuron.cached_neuron_stake_e8s);
assert_eq!(
custodian_neuron_stake,
actual_custodian_account_balance_after_forward
);
}
/// Assert that GTC neurons can be donated by the owner of the GTC account
async fn assert_neurons_can_be_donated(
nns_canisters: &NnsCanisters<'_>,
custodian_neuron_id: NeuronId,
custodian_key_pair: &'static Keypair,
test_identity: &'static TestIdentity,
) {
let gtc = &nns_canisters.genesis_token;
let governance = &nns_canisters.governance;
let ledger = &nns_canisters.ledger;
let sign_cmd = move |msg: &[u8]| Ok(test_identity.sign(msg));
let sender = Sender::ExternalHsm {
pub_key: der_encode(&test_identity.public_key()),
sign: Arc::new(sign_cmd),
};
// Assert that `test_identity` has not yet claimed or donated their neurons
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
test_identity.gtc_address.to_string(),
&sender,
)
.await;
let account_before_donation = get_account_response.unwrap().unwrap();
assert!(!account_before_donation.has_claimed);
assert!(!account_before_donation.has_donated);
assert!(!account_before_donation.has_forwarded);
// Calculate how much ICPT is expected to be donated to the custodian
// neuron.
let expected_custodian_account_balance_increase: Tokens = Tokens::from_e8s(
Tokens::from_tokens(account_before_donation.icpts as u64)
.unwrap()
.get_e8s()
- (DEFAULT_TRANSFER_FEE.get_e8s() * account_before_donation.neuron_ids.len() as u64),
);
// Get the custodian neuron and its ledger account, so that we can later
// assert that the account value has increased (as the result of a
// donation).
let get_full_neuron_response: Result<Result<Neuron, GovernanceError>, String> = governance
.update_from_sender(
"get_full_neuron",
candid_one,
custodian_neuron_id.id,
&Sender::from_keypair(custodian_key_pair),
)
.await;
let custodian_neuron = get_full_neuron_response.unwrap().unwrap();
let custodian_subaccount = Subaccount::try_from(&custodian_neuron.account[..]).unwrap();
let custodian_account =
AccountIdentifier::new(GOVERNANCE_CANISTER_ID.get(), Some(custodian_subaccount));
let account_balance_response: Result<Tokens, String> = ledger
.query_from_sender(
"account_balance_pb",
protobuf,
AccountBalanceArgs {
account: custodian_account,
},
&Sender::from_keypair(custodian_key_pair),
)
.await;
let custodian_account_balance = account_balance_response.unwrap();
let expected_custodian_account_balance_after_donation =
(custodian_account_balance + expected_custodian_account_balance_increase).unwrap();
// Have `test_identity` donate their neurons
let donate_account_response: Result<Result<(), String>, String> = gtc
.update_from_sender(
"donate_account",
candid_one,
test_identity.public_key_hex.to_string(),
&sender,
)
.await;
assert!(donate_account_response.unwrap().is_ok());
// Assert that `test_identity` has donated their neurons
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
test_identity.gtc_address.to_string(),
&sender,
)
.await;
let account_after_donation = get_account_response.unwrap().unwrap();
assert!(account_after_donation.has_donated);
assert_eq!(
account_after_donation.authenticated_principal_id,
Some(test_identity.principal_id())
);
assert_eq!(
account_after_donation
.successfully_transferred_neurons
.len(),
account_before_donation.neuron_ids.len(),
);
// Assert that donated neurons can't be claimed
let claim_neurons_response: Result<Result<Vec<NeuronId>, String>, String> = gtc
.update_from_sender(
"claim_neurons",
candid,
(test_identity.public_key_hex,),
&sender,
)
.await;
assert!(claim_neurons_response.unwrap().is_err());
// Assert calling donate a second time fails
let donate_account_response: Result<Result<(), String>, String> = gtc
.update_from_sender(
"donate_account",
candid_one,
test_identity.public_key_hex.to_string(),
&sender,
)
.await;
assert!(donate_account_response.unwrap().is_err());
// Assert that the custodian neuron's ledger account has received the
// donated funds
let account_balance_response: Result<Tokens, String> = ledger
.query_from_sender(
"account_balance_pb",
protobuf,
AccountBalanceArgs {
account: custodian_account,
},
&Sender::from_keypair(custodian_key_pair),
)
.await;
let actual_custodian_account_balance_after_donation = account_balance_response.unwrap();
assert_eq!(
expected_custodian_account_balance_after_donation,
actual_custodian_account_balance_after_donation
);
// Assert that the custodian neuron's stake matches its ledger account
// balance
let get_full_neuron_response: Result<Result<Neuron, GovernanceError>, String> = governance
.update_from_sender(
"get_full_neuron",
candid_one,
custodian_neuron_id.id,
&Sender::from_keypair(custodian_key_pair),
)
.await;
let custodian_neuron = get_full_neuron_response.unwrap().unwrap();
let custodian_neuron_stake = Tokens::from_e8s(custodian_neuron.cached_neuron_stake_e8s);
assert_eq!(
custodian_neuron_stake,
actual_custodian_account_balance_after_donation
);
}
/// Test that the given `test_identity` can claim their neurons, expected to
/// be `expected_neuron_ids`.
async fn assert_neurons_can_be_claimed(
nns_canisters: &NnsCanisters<'_>,
expected_neuron_ids: Vec<NeuronId>,
test_identity: &'static TestIdentity,
) {
let gtc = &nns_canisters.genesis_token;
let governance = &nns_canisters.governance;
let sign_cmd = move |msg: &[u8]| Ok(test_identity.sign(msg));
let sender = Sender::ExternalHsm {
pub_key: der_encode(&test_identity.public_key()),
sign: Arc::new(sign_cmd),
};
// Assert that `test_identity` has not yet claimed their neurons
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
test_identity.gtc_address.to_string(),
&sender,
)
.await;
assert!(!get_account_response.unwrap().unwrap().has_claimed);
// Assert that `test_identity` does not control any neurons in the Governance
// canister
let get_neuron_ids_response: Result<Vec<u64>, String> = governance
.update_from_sender("get_neuron_ids", candid, (), &sender)
.await;
assert!(get_neuron_ids_response.unwrap().is_empty());
// Given a sample neuron ID from `expected_neuron_ids`, assert that we can
// can get this neuron's info via the `get_neuron_info` Governance method,
// but `get_full_neuron` returns an error (as `test_identity` does not
// controll the neuron yet)
let sample_neuron_id = expected_neuron_ids.get(0).unwrap().id;
let get_neuron_info_response: Result<Result<NeuronInfo, GovernanceError>, String> = governance
.update_from_sender("get_neuron_info", candid_one, sample_neuron_id, &sender)
.await;
assert!(get_neuron_info_response.unwrap().is_ok());
let get_full_neuron_response: Result<Result<Neuron, GovernanceError>, String> = governance
.update_from_sender("get_full_neuron", candid_one, sample_neuron_id, &sender)
.await;
assert!(get_full_neuron_response.unwrap().is_err());
// Call the GTC to claim neurons for `test_identity`
let gtc_response: Result<Result<Vec<NeuronId>, String>, String> = gtc
.update_from_sender(
"claim_neurons",
candid,
(test_identity.public_key_hex,),
&sender,
)
.await;
let returned_neuron_ids = gtc_response.unwrap().unwrap();
let get_neuron_ids_response: Result<Vec<u64>, String> = governance
.update_from_sender("get_neuron_ids", candid, (), &sender)
.await;
let controlled_neuron_ids: Vec<NeuronId> = get_neuron_ids_response
.unwrap()
.into_iter()
.map(|id| NeuronId { id })
.collect();
// Assert that the neuron IDs:
// * returned by the GTC's `claim_neurons` method
// * returned by the Governance's `get_neuron_ids` method
// * given by `expected_neuron_ids`
// all contain the exact same set of neuron IDs
let returned_neuron_ids_set: HashSet<NeuronId> = returned_neuron_ids.iter().cloned().collect();
let expected_neuron_ids_set: HashSet<NeuronId> = expected_neuron_ids.iter().cloned().collect();
let controlled_neuron_ids_set: HashSet<NeuronId> =
controlled_neuron_ids.iter().cloned().collect();
assert_eq!(returned_neuron_ids_set, expected_neuron_ids_set);
assert_eq!(controlled_neuron_ids_set, expected_neuron_ids_set);
// Assert that `test_identity` has now claimed their neurons
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
test_identity.gtc_address.to_string(),
&sender,
)
.await;
assert!(get_account_response.unwrap().unwrap().has_claimed);
// Assert that calling `get_full_neuron` with `sample_neuron_id` now
// returns successfully, as `test_identity` now controls this neuron
let governance_response: Result<Result<Neuron, GovernanceError>, String> = governance
.update_from_sender("get_full_neuron", candid_one, sample_neuron_id, &sender)
.await;
let neuron = governance_response.unwrap().unwrap();
assert_eq!(neuron.controller, Some(test_identity.principal_id()));
// Assert that calling `claim_neurons` a second time returns the same set
// of neuron IDs
let gtc_response_2: Result<Result<Vec<NeuronId>, String>, String> = gtc
.update_from_sender(
"claim_neurons",
candid,
(test_identity.public_key_hex,),
&sender,
)
.await;
let returned_neuron_ids_2 = gtc_response_2.unwrap().unwrap();
let returned_neuron_ids_2_set: HashSet<NeuronId> =
returned_neuron_ids_2.iter().cloned().collect();
assert_eq!(returned_neuron_ids_2_set, expected_neuron_ids_set);
// Assert that `test_identity`'s principal has been set in their GTC account
let get_account_response: Result<Result<AccountState, String>, String> = gtc
.update_from_sender(
"get_account",
candid_one,
test_identity.gtc_address.to_string(),
&sender,
)
.await;
assert_eq!(
get_account_response
.unwrap()
.unwrap()
.authenticated_principal_id,
Some(test_identity.principal_id())
);
// Assert that a claimed neuron is pre-aged
let get_neuron_info_response: Result<Result<NeuronInfo, GovernanceError>, String> = governance
.update_from_sender("get_neuron_info", candid_one, sample_neuron_id, &sender)
.await;
let neuron_info = get_neuron_info_response.unwrap().unwrap();
assert!(neuron_info.age_seconds >= 86400 * 18 * 30);
}
pub fn add_test_gtc_neurons(payload_builder: &mut NnsInitPayloadsBuilder) {
payload_builder.genesis_token.genesis_timestamp_seconds = 1;
payload_builder.genesis_token.sr_months_to_release = Some(SR_MONTHS_TO_RELEASE);
payload_builder.genesis_token.ect_months_to_release = Some(ECT_MONTHS_TO_RELEASE);
payload_builder
.genesis_token
.add_sr_neurons(TEST_SR_ACCOUNTS);
payload_builder
.genesis_token
.add_ect_neurons(TEST_ECT_ACCOUNTS);
payload_builder
.governance
.add_gtc_neurons(payload_builder.genesis_token.get_gtc_neurons());
payload_builder
.genesis_token
.add_forward_whitelist(&[TEST_IDENTITY_4.gtc_address]);
payload_builder.governance.with_test_neurons();
}
/// Return the neuron ID of the neuron that the GTC method `donate_account`
/// should donate to.
fn get_donate_account_recipient_neuron_id(payload_builder: &NnsInitPayloadsBuilder) -> NeuronId {
let id = *payload_builder
.governance
.proto
.neurons
.iter()
.find(|(_, neuron)| neuron.controller == Some(*TEST_NEURON_1_OWNER_PRINCIPAL))
.unwrap()
.0;
NeuronId { id }
}
/// Return the neuron ID of the neuron that the GTC method
/// `forward_whitelisted_unclaimed_accounts` should donate to.
fn get_forward_whitelisted_unclaimed_accounts_recipient_neuron_id(
payload_builder: &NnsInitPayloadsBuilder,
) -> NeuronId {
let id = *payload_builder
.governance
.proto
.neurons
.iter()
.find(|(_, neuron)| neuron.controller == Some(*TEST_NEURON_2_OWNER_PRINCIPAL))
.unwrap()
.0;
NeuronId { id }
}
| 36.46063 | 99 | 0.670518 |
f5a9f94cab19f8e4e6d9e91b3d45f52d90d3d97d | 935,356 | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn availability_sets(&self) -> availability_sets::Client {
availability_sets::Client(self.clone())
}
pub fn cloud_service_operating_systems(&self) -> cloud_service_operating_systems::Client {
cloud_service_operating_systems::Client(self.clone())
}
pub fn cloud_service_role_instances(&self) -> cloud_service_role_instances::Client {
cloud_service_role_instances::Client(self.clone())
}
pub fn cloud_service_roles(&self) -> cloud_service_roles::Client {
cloud_service_roles::Client(self.clone())
}
pub fn cloud_services(&self) -> cloud_services::Client {
cloud_services::Client(self.clone())
}
pub fn cloud_services_update_domain(&self) -> cloud_services_update_domain::Client {
cloud_services_update_domain::Client(self.clone())
}
pub fn dedicated_host_groups(&self) -> dedicated_host_groups::Client {
dedicated_host_groups::Client(self.clone())
}
pub fn dedicated_hosts(&self) -> dedicated_hosts::Client {
dedicated_hosts::Client(self.clone())
}
pub fn images(&self) -> images::Client {
images::Client(self.clone())
}
pub fn log_analytics(&self) -> log_analytics::Client {
log_analytics::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn proximity_placement_groups(&self) -> proximity_placement_groups::Client {
proximity_placement_groups::Client(self.clone())
}
pub fn restore_point_collections(&self) -> restore_point_collections::Client {
restore_point_collections::Client(self.clone())
}
pub fn restore_points(&self) -> restore_points::Client {
restore_points::Client(self.clone())
}
pub fn ssh_public_keys(&self) -> ssh_public_keys::Client {
ssh_public_keys::Client(self.clone())
}
pub fn usage(&self) -> usage::Client {
usage::Client(self.clone())
}
pub fn virtual_machine_extension_images(&self) -> virtual_machine_extension_images::Client {
virtual_machine_extension_images::Client(self.clone())
}
pub fn virtual_machine_extensions(&self) -> virtual_machine_extensions::Client {
virtual_machine_extensions::Client(self.clone())
}
pub fn virtual_machine_images(&self) -> virtual_machine_images::Client {
virtual_machine_images::Client(self.clone())
}
pub fn virtual_machine_images_edge_zone(&self) -> virtual_machine_images_edge_zone::Client {
virtual_machine_images_edge_zone::Client(self.clone())
}
pub fn virtual_machine_run_commands(&self) -> virtual_machine_run_commands::Client {
virtual_machine_run_commands::Client(self.clone())
}
pub fn virtual_machine_scale_set_extensions(&self) -> virtual_machine_scale_set_extensions::Client {
virtual_machine_scale_set_extensions::Client(self.clone())
}
pub fn virtual_machine_scale_set_rolling_upgrades(&self) -> virtual_machine_scale_set_rolling_upgrades::Client {
virtual_machine_scale_set_rolling_upgrades::Client(self.clone())
}
pub fn virtual_machine_scale_set_v_ms(&self) -> virtual_machine_scale_set_v_ms::Client {
virtual_machine_scale_set_v_ms::Client(self.clone())
}
pub fn virtual_machine_scale_set_vm_extensions(&self) -> virtual_machine_scale_set_vm_extensions::Client {
virtual_machine_scale_set_vm_extensions::Client(self.clone())
}
pub fn virtual_machine_scale_set_vm_run_commands(&self) -> virtual_machine_scale_set_vm_run_commands::Client {
virtual_machine_scale_set_vm_run_commands::Client(self.clone())
}
pub fn virtual_machine_scale_sets(&self) -> virtual_machine_scale_sets::Client {
virtual_machine_scale_sets::Client(self.clone())
}
pub fn virtual_machine_sizes(&self) -> virtual_machine_sizes::Client {
virtual_machine_sizes::Client(self.clone())
}
pub fn virtual_machines(&self) -> virtual_machines::Client {
virtual_machines::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
CloudServiceRoleInstances_Get(#[from] cloud_service_role_instances::get::Error),
#[error(transparent)]
CloudServiceRoleInstances_Delete(#[from] cloud_service_role_instances::delete::Error),
#[error(transparent)]
CloudServiceRoleInstances_GetInstanceView(#[from] cloud_service_role_instances::get_instance_view::Error),
#[error(transparent)]
CloudServiceRoleInstances_List(#[from] cloud_service_role_instances::list::Error),
#[error(transparent)]
CloudServiceRoleInstances_Restart(#[from] cloud_service_role_instances::restart::Error),
#[error(transparent)]
CloudServiceRoleInstances_Reimage(#[from] cloud_service_role_instances::reimage::Error),
#[error(transparent)]
CloudServiceRoleInstances_Rebuild(#[from] cloud_service_role_instances::rebuild::Error),
#[error(transparent)]
CloudServiceRoleInstances_GetRemoteDesktopFile(#[from] cloud_service_role_instances::get_remote_desktop_file::Error),
#[error(transparent)]
CloudServiceRoles_Get(#[from] cloud_service_roles::get::Error),
#[error(transparent)]
CloudServiceRoles_List(#[from] cloud_service_roles::list::Error),
#[error(transparent)]
CloudServices_Get(#[from] cloud_services::get::Error),
#[error(transparent)]
CloudServices_CreateOrUpdate(#[from] cloud_services::create_or_update::Error),
#[error(transparent)]
CloudServices_Update(#[from] cloud_services::update::Error),
#[error(transparent)]
CloudServices_Delete(#[from] cloud_services::delete::Error),
#[error(transparent)]
CloudServices_GetInstanceView(#[from] cloud_services::get_instance_view::Error),
#[error(transparent)]
CloudServices_ListAll(#[from] cloud_services::list_all::Error),
#[error(transparent)]
CloudServices_List(#[from] cloud_services::list::Error),
#[error(transparent)]
CloudServices_Start(#[from] cloud_services::start::Error),
#[error(transparent)]
CloudServices_PowerOff(#[from] cloud_services::power_off::Error),
#[error(transparent)]
CloudServices_Restart(#[from] cloud_services::restart::Error),
#[error(transparent)]
CloudServices_Reimage(#[from] cloud_services::reimage::Error),
#[error(transparent)]
CloudServices_Rebuild(#[from] cloud_services::rebuild::Error),
#[error(transparent)]
CloudServices_DeleteInstances(#[from] cloud_services::delete_instances::Error),
#[error(transparent)]
CloudServicesUpdateDomain_GetUpdateDomain(#[from] cloud_services_update_domain::get_update_domain::Error),
#[error(transparent)]
CloudServicesUpdateDomain_WalkUpdateDomain(#[from] cloud_services_update_domain::walk_update_domain::Error),
#[error(transparent)]
CloudServicesUpdateDomain_ListUpdateDomains(#[from] cloud_services_update_domain::list_update_domains::Error),
#[error(transparent)]
CloudServiceOperatingSystems_GetOsVersion(#[from] cloud_service_operating_systems::get_os_version::Error),
#[error(transparent)]
CloudServiceOperatingSystems_ListOsVersions(#[from] cloud_service_operating_systems::list_os_versions::Error),
#[error(transparent)]
CloudServiceOperatingSystems_GetOsFamily(#[from] cloud_service_operating_systems::get_os_family::Error),
#[error(transparent)]
CloudServiceOperatingSystems_ListOsFamilies(#[from] cloud_service_operating_systems::list_os_families::Error),
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
AvailabilitySets_Get(#[from] availability_sets::get::Error),
#[error(transparent)]
AvailabilitySets_CreateOrUpdate(#[from] availability_sets::create_or_update::Error),
#[error(transparent)]
AvailabilitySets_Update(#[from] availability_sets::update::Error),
#[error(transparent)]
AvailabilitySets_Delete(#[from] availability_sets::delete::Error),
#[error(transparent)]
AvailabilitySets_ListBySubscription(#[from] availability_sets::list_by_subscription::Error),
#[error(transparent)]
AvailabilitySets_List(#[from] availability_sets::list::Error),
#[error(transparent)]
AvailabilitySets_ListAvailableSizes(#[from] availability_sets::list_available_sizes::Error),
#[error(transparent)]
ProximityPlacementGroups_Get(#[from] proximity_placement_groups::get::Error),
#[error(transparent)]
ProximityPlacementGroups_CreateOrUpdate(#[from] proximity_placement_groups::create_or_update::Error),
#[error(transparent)]
ProximityPlacementGroups_Update(#[from] proximity_placement_groups::update::Error),
#[error(transparent)]
ProximityPlacementGroups_Delete(#[from] proximity_placement_groups::delete::Error),
#[error(transparent)]
ProximityPlacementGroups_ListBySubscription(#[from] proximity_placement_groups::list_by_subscription::Error),
#[error(transparent)]
ProximityPlacementGroups_ListByResourceGroup(#[from] proximity_placement_groups::list_by_resource_group::Error),
#[error(transparent)]
DedicatedHostGroups_Get(#[from] dedicated_host_groups::get::Error),
#[error(transparent)]
DedicatedHostGroups_CreateOrUpdate(#[from] dedicated_host_groups::create_or_update::Error),
#[error(transparent)]
DedicatedHostGroups_Update(#[from] dedicated_host_groups::update::Error),
#[error(transparent)]
DedicatedHostGroups_Delete(#[from] dedicated_host_groups::delete::Error),
#[error(transparent)]
DedicatedHostGroups_ListByResourceGroup(#[from] dedicated_host_groups::list_by_resource_group::Error),
#[error(transparent)]
DedicatedHostGroups_ListBySubscription(#[from] dedicated_host_groups::list_by_subscription::Error),
#[error(transparent)]
DedicatedHosts_Get(#[from] dedicated_hosts::get::Error),
#[error(transparent)]
DedicatedHosts_CreateOrUpdate(#[from] dedicated_hosts::create_or_update::Error),
#[error(transparent)]
DedicatedHosts_Update(#[from] dedicated_hosts::update::Error),
#[error(transparent)]
DedicatedHosts_Delete(#[from] dedicated_hosts::delete::Error),
#[error(transparent)]
DedicatedHosts_ListByHostGroup(#[from] dedicated_hosts::list_by_host_group::Error),
#[error(transparent)]
SshPublicKeys_ListBySubscription(#[from] ssh_public_keys::list_by_subscription::Error),
#[error(transparent)]
SshPublicKeys_ListByResourceGroup(#[from] ssh_public_keys::list_by_resource_group::Error),
#[error(transparent)]
SshPublicKeys_Get(#[from] ssh_public_keys::get::Error),
#[error(transparent)]
SshPublicKeys_Create(#[from] ssh_public_keys::create::Error),
#[error(transparent)]
SshPublicKeys_Update(#[from] ssh_public_keys::update::Error),
#[error(transparent)]
SshPublicKeys_Delete(#[from] ssh_public_keys::delete::Error),
#[error(transparent)]
SshPublicKeys_GenerateKeyPair(#[from] ssh_public_keys::generate_key_pair::Error),
#[error(transparent)]
VirtualMachineExtensionImages_Get(#[from] virtual_machine_extension_images::get::Error),
#[error(transparent)]
VirtualMachineExtensionImages_ListTypes(#[from] virtual_machine_extension_images::list_types::Error),
#[error(transparent)]
VirtualMachineExtensionImages_ListVersions(#[from] virtual_machine_extension_images::list_versions::Error),
#[error(transparent)]
VirtualMachineExtensions_Get(#[from] virtual_machine_extensions::get::Error),
#[error(transparent)]
VirtualMachineExtensions_CreateOrUpdate(#[from] virtual_machine_extensions::create_or_update::Error),
#[error(transparent)]
VirtualMachineExtensions_Update(#[from] virtual_machine_extensions::update::Error),
#[error(transparent)]
VirtualMachineExtensions_Delete(#[from] virtual_machine_extensions::delete::Error),
#[error(transparent)]
VirtualMachineExtensions_List(#[from] virtual_machine_extensions::list::Error),
#[error(transparent)]
VirtualMachineImages_Get(#[from] virtual_machine_images::get::Error),
#[error(transparent)]
VirtualMachineImages_List(#[from] virtual_machine_images::list::Error),
#[error(transparent)]
VirtualMachineImages_ListOffers(#[from] virtual_machine_images::list_offers::Error),
#[error(transparent)]
VirtualMachineImages_ListPublishers(#[from] virtual_machine_images::list_publishers::Error),
#[error(transparent)]
VirtualMachineImages_ListSkus(#[from] virtual_machine_images::list_skus::Error),
#[error(transparent)]
VirtualMachineImagesEdgeZone_Get(#[from] virtual_machine_images_edge_zone::get::Error),
#[error(transparent)]
VirtualMachineImagesEdgeZone_List(#[from] virtual_machine_images_edge_zone::list::Error),
#[error(transparent)]
VirtualMachineImagesEdgeZone_ListOffers(#[from] virtual_machine_images_edge_zone::list_offers::Error),
#[error(transparent)]
VirtualMachineImagesEdgeZone_ListPublishers(#[from] virtual_machine_images_edge_zone::list_publishers::Error),
#[error(transparent)]
VirtualMachineImagesEdgeZone_ListSkus(#[from] virtual_machine_images_edge_zone::list_skus::Error),
#[error(transparent)]
Usage_List(#[from] usage::list::Error),
#[error(transparent)]
VirtualMachines_ListByLocation(#[from] virtual_machines::list_by_location::Error),
#[error(transparent)]
VirtualMachineScaleSets_ListByLocation(#[from] virtual_machine_scale_sets::list_by_location::Error),
#[error(transparent)]
VirtualMachineSizes_List(#[from] virtual_machine_sizes::list::Error),
#[error(transparent)]
Images_Get(#[from] images::get::Error),
#[error(transparent)]
Images_CreateOrUpdate(#[from] images::create_or_update::Error),
#[error(transparent)]
Images_Update(#[from] images::update::Error),
#[error(transparent)]
Images_Delete(#[from] images::delete::Error),
#[error(transparent)]
Images_ListByResourceGroup(#[from] images::list_by_resource_group::Error),
#[error(transparent)]
Images_List(#[from] images::list::Error),
#[error(transparent)]
VirtualMachines_Capture(#[from] virtual_machines::capture::Error),
#[error(transparent)]
VirtualMachines_Get(#[from] virtual_machines::get::Error),
#[error(transparent)]
VirtualMachines_CreateOrUpdate(#[from] virtual_machines::create_or_update::Error),
#[error(transparent)]
VirtualMachines_Update(#[from] virtual_machines::update::Error),
#[error(transparent)]
VirtualMachines_Delete(#[from] virtual_machines::delete::Error),
#[error(transparent)]
VirtualMachines_InstanceView(#[from] virtual_machines::instance_view::Error),
#[error(transparent)]
VirtualMachines_ConvertToManagedDisks(#[from] virtual_machines::convert_to_managed_disks::Error),
#[error(transparent)]
VirtualMachines_Deallocate(#[from] virtual_machines::deallocate::Error),
#[error(transparent)]
VirtualMachines_Generalize(#[from] virtual_machines::generalize::Error),
#[error(transparent)]
VirtualMachines_List(#[from] virtual_machines::list::Error),
#[error(transparent)]
VirtualMachines_ListAll(#[from] virtual_machines::list_all::Error),
#[error(transparent)]
RestorePointCollections_Get(#[from] restore_point_collections::get::Error),
#[error(transparent)]
RestorePointCollections_CreateOrUpdate(#[from] restore_point_collections::create_or_update::Error),
#[error(transparent)]
RestorePointCollections_Update(#[from] restore_point_collections::update::Error),
#[error(transparent)]
RestorePointCollections_Delete(#[from] restore_point_collections::delete::Error),
#[error(transparent)]
RestorePointCollections_List(#[from] restore_point_collections::list::Error),
#[error(transparent)]
RestorePointCollections_ListAll(#[from] restore_point_collections::list_all::Error),
#[error(transparent)]
RestorePoints_Get(#[from] restore_points::get::Error),
#[error(transparent)]
RestorePoints_Create(#[from] restore_points::create::Error),
#[error(transparent)]
RestorePoints_Delete(#[from] restore_points::delete::Error),
#[error(transparent)]
VirtualMachines_ListAvailableSizes(#[from] virtual_machines::list_available_sizes::Error),
#[error(transparent)]
VirtualMachines_PowerOff(#[from] virtual_machines::power_off::Error),
#[error(transparent)]
VirtualMachines_Reapply(#[from] virtual_machines::reapply::Error),
#[error(transparent)]
VirtualMachines_Restart(#[from] virtual_machines::restart::Error),
#[error(transparent)]
VirtualMachines_Start(#[from] virtual_machines::start::Error),
#[error(transparent)]
VirtualMachines_Redeploy(#[from] virtual_machines::redeploy::Error),
#[error(transparent)]
VirtualMachines_Reimage(#[from] virtual_machines::reimage::Error),
#[error(transparent)]
VirtualMachines_RetrieveBootDiagnosticsData(#[from] virtual_machines::retrieve_boot_diagnostics_data::Error),
#[error(transparent)]
VirtualMachines_PerformMaintenance(#[from] virtual_machines::perform_maintenance::Error),
#[error(transparent)]
VirtualMachines_SimulateEviction(#[from] virtual_machines::simulate_eviction::Error),
#[error(transparent)]
VirtualMachines_AssessPatches(#[from] virtual_machines::assess_patches::Error),
#[error(transparent)]
VirtualMachines_InstallPatches(#[from] virtual_machines::install_patches::Error),
#[error(transparent)]
VirtualMachineScaleSets_Get(#[from] virtual_machine_scale_sets::get::Error),
#[error(transparent)]
VirtualMachineScaleSets_CreateOrUpdate(#[from] virtual_machine_scale_sets::create_or_update::Error),
#[error(transparent)]
VirtualMachineScaleSets_Update(#[from] virtual_machine_scale_sets::update::Error),
#[error(transparent)]
VirtualMachineScaleSets_Delete(#[from] virtual_machine_scale_sets::delete::Error),
#[error(transparent)]
VirtualMachineScaleSets_Deallocate(#[from] virtual_machine_scale_sets::deallocate::Error),
#[error(transparent)]
VirtualMachineScaleSets_DeleteInstances(#[from] virtual_machine_scale_sets::delete_instances::Error),
#[error(transparent)]
VirtualMachineScaleSets_GetInstanceView(#[from] virtual_machine_scale_sets::get_instance_view::Error),
#[error(transparent)]
VirtualMachineScaleSets_List(#[from] virtual_machine_scale_sets::list::Error),
#[error(transparent)]
VirtualMachineScaleSetExtensions_Get(#[from] virtual_machine_scale_set_extensions::get::Error),
#[error(transparent)]
VirtualMachineScaleSetExtensions_CreateOrUpdate(#[from] virtual_machine_scale_set_extensions::create_or_update::Error),
#[error(transparent)]
VirtualMachineScaleSetExtensions_Update(#[from] virtual_machine_scale_set_extensions::update::Error),
#[error(transparent)]
VirtualMachineScaleSetExtensions_Delete(#[from] virtual_machine_scale_set_extensions::delete::Error),
#[error(transparent)]
VirtualMachineScaleSetExtensions_List(#[from] virtual_machine_scale_set_extensions::list::Error),
#[error(transparent)]
VirtualMachineScaleSets_ListAll(#[from] virtual_machine_scale_sets::list_all::Error),
#[error(transparent)]
VirtualMachineScaleSets_ListSkus(#[from] virtual_machine_scale_sets::list_skus::Error),
#[error(transparent)]
VirtualMachineScaleSets_GetOsUpgradeHistory(#[from] virtual_machine_scale_sets::get_os_upgrade_history::Error),
#[error(transparent)]
VirtualMachineScaleSets_PowerOff(#[from] virtual_machine_scale_sets::power_off::Error),
#[error(transparent)]
VirtualMachineScaleSets_Restart(#[from] virtual_machine_scale_sets::restart::Error),
#[error(transparent)]
VirtualMachineScaleSets_Start(#[from] virtual_machine_scale_sets::start::Error),
#[error(transparent)]
VirtualMachineScaleSets_Redeploy(#[from] virtual_machine_scale_sets::redeploy::Error),
#[error(transparent)]
VirtualMachineScaleSets_PerformMaintenance(#[from] virtual_machine_scale_sets::perform_maintenance::Error),
#[error(transparent)]
VirtualMachineScaleSets_UpdateInstances(#[from] virtual_machine_scale_sets::update_instances::Error),
#[error(transparent)]
VirtualMachineScaleSets_Reimage(#[from] virtual_machine_scale_sets::reimage::Error),
#[error(transparent)]
VirtualMachineScaleSets_ReimageAll(#[from] virtual_machine_scale_sets::reimage_all::Error),
#[error(transparent)]
VirtualMachineScaleSetRollingUpgrades_Cancel(#[from] virtual_machine_scale_set_rolling_upgrades::cancel::Error),
#[error(transparent)]
VirtualMachineScaleSetRollingUpgrades_StartOsUpgrade(#[from] virtual_machine_scale_set_rolling_upgrades::start_os_upgrade::Error),
#[error(transparent)]
VirtualMachineScaleSetRollingUpgrades_StartExtensionUpgrade(
#[from] virtual_machine_scale_set_rolling_upgrades::start_extension_upgrade::Error,
),
#[error(transparent)]
VirtualMachineScaleSetRollingUpgrades_GetLatest(#[from] virtual_machine_scale_set_rolling_upgrades::get_latest::Error),
#[error(transparent)]
VirtualMachineScaleSets_ForceRecoveryServiceFabricPlatformUpdateDomainWalk(
#[from] virtual_machine_scale_sets::force_recovery_service_fabric_platform_update_domain_walk::Error,
),
#[error(transparent)]
VirtualMachineScaleSets_ConvertToSinglePlacementGroup(#[from] virtual_machine_scale_sets::convert_to_single_placement_group::Error),
#[error(transparent)]
VirtualMachineScaleSets_SetOrchestrationServiceState(#[from] virtual_machine_scale_sets::set_orchestration_service_state::Error),
#[error(transparent)]
VirtualMachineScaleSetVmExtensions_Get(#[from] virtual_machine_scale_set_vm_extensions::get::Error),
#[error(transparent)]
VirtualMachineScaleSetVmExtensions_CreateOrUpdate(#[from] virtual_machine_scale_set_vm_extensions::create_or_update::Error),
#[error(transparent)]
VirtualMachineScaleSetVmExtensions_Update(#[from] virtual_machine_scale_set_vm_extensions::update::Error),
#[error(transparent)]
VirtualMachineScaleSetVmExtensions_Delete(#[from] virtual_machine_scale_set_vm_extensions::delete::Error),
#[error(transparent)]
VirtualMachineScaleSetVmExtensions_List(#[from] virtual_machine_scale_set_vm_extensions::list::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Reimage(#[from] virtual_machine_scale_set_v_ms::reimage::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_ReimageAll(#[from] virtual_machine_scale_set_v_ms::reimage_all::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Deallocate(#[from] virtual_machine_scale_set_v_ms::deallocate::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Get(#[from] virtual_machine_scale_set_v_ms::get::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Update(#[from] virtual_machine_scale_set_v_ms::update::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Delete(#[from] virtual_machine_scale_set_v_ms::delete::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_GetInstanceView(#[from] virtual_machine_scale_set_v_ms::get_instance_view::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_List(#[from] virtual_machine_scale_set_v_ms::list::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_PowerOff(#[from] virtual_machine_scale_set_v_ms::power_off::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Restart(#[from] virtual_machine_scale_set_v_ms::restart::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Start(#[from] virtual_machine_scale_set_v_ms::start::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_Redeploy(#[from] virtual_machine_scale_set_v_ms::redeploy::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_RetrieveBootDiagnosticsData(#[from] virtual_machine_scale_set_v_ms::retrieve_boot_diagnostics_data::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_PerformMaintenance(#[from] virtual_machine_scale_set_v_ms::perform_maintenance::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_SimulateEviction(#[from] virtual_machine_scale_set_v_ms::simulate_eviction::Error),
#[error(transparent)]
LogAnalytics_ExportRequestRateByInterval(#[from] log_analytics::export_request_rate_by_interval::Error),
#[error(transparent)]
LogAnalytics_ExportThrottledRequests(#[from] log_analytics::export_throttled_requests::Error),
#[error(transparent)]
VirtualMachineRunCommands_List(#[from] virtual_machine_run_commands::list::Error),
#[error(transparent)]
VirtualMachineRunCommands_Get(#[from] virtual_machine_run_commands::get::Error),
#[error(transparent)]
VirtualMachines_RunCommand(#[from] virtual_machines::run_command::Error),
#[error(transparent)]
VirtualMachineScaleSetVMs_RunCommand(#[from] virtual_machine_scale_set_v_ms::run_command::Error),
#[error(transparent)]
VirtualMachineRunCommands_GetByVirtualMachine(#[from] virtual_machine_run_commands::get_by_virtual_machine::Error),
#[error(transparent)]
VirtualMachineRunCommands_CreateOrUpdate(#[from] virtual_machine_run_commands::create_or_update::Error),
#[error(transparent)]
VirtualMachineRunCommands_Update(#[from] virtual_machine_run_commands::update::Error),
#[error(transparent)]
VirtualMachineRunCommands_Delete(#[from] virtual_machine_run_commands::delete::Error),
#[error(transparent)]
VirtualMachineRunCommands_ListByVirtualMachine(#[from] virtual_machine_run_commands::list_by_virtual_machine::Error),
#[error(transparent)]
VirtualMachineScaleSetVmRunCommands_Get(#[from] virtual_machine_scale_set_vm_run_commands::get::Error),
#[error(transparent)]
VirtualMachineScaleSetVmRunCommands_CreateOrUpdate(#[from] virtual_machine_scale_set_vm_run_commands::create_or_update::Error),
#[error(transparent)]
VirtualMachineScaleSetVmRunCommands_Update(#[from] virtual_machine_scale_set_vm_run_commands::update::Error),
#[error(transparent)]
VirtualMachineScaleSetVmRunCommands_Delete(#[from] virtual_machine_scale_set_vm_run_commands::delete::Error),
#[error(transparent)]
VirtualMachineScaleSetVmRunCommands_List(#[from] virtual_machine_scale_set_vm_run_commands::list::Error),
}
pub mod cloud_service_role_instances {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn delete(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_instance_view(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_instance_view::Builder {
get_instance_view::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn restart(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> restart::Builder {
restart::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn reimage(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage::Builder {
reimage::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn rebuild(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> rebuild::Builder {
rebuild::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_remote_desktop_file(
&self,
role_instance_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_remote_desktop_file::Builder {
get_remote_desktop_file::Builder {
client: self.0.clone(),
role_instance_name: role_instance_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RoleInstance, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_instance_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RoleInstance =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_instance_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_instance_view {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RoleInstanceInstanceView, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}/instanceView",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_instance_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RoleInstanceInstanceView =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RoleInstanceListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RoleInstanceListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod restart {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}/restart",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_instance_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod reimage {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}/reimage",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_instance_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod rebuild {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}/rebuild",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_instance_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_remote_desktop_file {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_instance_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<bytes::Bytes, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roleInstances/{}/remoteDesktopFile" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . cloud_service_name , & self . role_instance_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value = rsp_body;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod cloud_service_roles {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
role_name: impl Into<String>,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
role_name: role_name.into(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) role_name: String,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudServiceRole, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roles/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.role_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudServiceRole =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudServiceRoleListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/roles",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudServiceRoleListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod cloud_services {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_instance_view(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_instance_view::Builder {
get_instance_view::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_all(&self, subscription_id: impl Into<String>) -> list_all::Builder {
list_all::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list(&self, resource_group_name: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn start(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> start::Builder {
start::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn power_off(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> power_off::Builder {
power_off::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn restart(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> restart::Builder {
restart::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn reimage(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage::Builder {
reimage::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn rebuild(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> rebuild::Builder {
rebuild::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn delete_instances(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete_instances::Builder {
delete_instances::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudService, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudService =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::CloudService),
Created201(models::CloudService),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::CloudService>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::CloudService>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudService =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudService =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::CloudServiceUpdate>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::CloudServiceUpdate>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudService, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudService =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_instance_view {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudServiceInstanceView, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/instanceView",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudServiceInstanceView =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_all {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudServiceListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/cloudServices",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudServiceListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CloudServiceListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudServiceListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod start {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/start",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod power_off {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/poweroff",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod restart {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::RoleInstances>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::RoleInstances>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/restart",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod reimage {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::RoleInstances>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::RoleInstances>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/reimage",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod rebuild {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::RoleInstances>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::RoleInstances>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/rebuild",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete_instances {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::RoleInstances>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::RoleInstances>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/delete",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod cloud_services_update_domain {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get_update_domain(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
update_domain: i32,
subscription_id: impl Into<String>,
) -> get_update_domain::Builder {
get_update_domain::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
update_domain,
subscription_id: subscription_id.into(),
}
}
pub fn walk_update_domain(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
update_domain: i32,
subscription_id: impl Into<String>,
) -> walk_update_domain::Builder {
walk_update_domain::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
update_domain,
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn list_update_domains(
&self,
resource_group_name: impl Into<String>,
cloud_service_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_update_domains::Builder {
list_update_domains::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cloud_service_name: cloud_service_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get_update_domain {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) update_domain: i32,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::UpdateDomain, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/updateDomains/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.update_domain
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::UpdateDomain =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod walk_update_domain {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) update_domain: i32,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::UpdateDomain>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::UpdateDomain>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/updateDomains/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name,
&self.update_domain
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_update_domains {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cloud_service_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::UpdateDomainListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/cloudServices/{}/updateDomains",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cloud_service_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::UpdateDomainListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod cloud_service_operating_systems {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get_os_version(
&self,
location: impl Into<String>,
os_version_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_os_version::Builder {
get_os_version::Builder {
client: self.0.clone(),
location: location.into(),
os_version_name: os_version_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_os_versions(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list_os_versions::Builder {
list_os_versions::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_os_family(
&self,
location: impl Into<String>,
os_family_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_os_family::Builder {
get_os_family::Builder {
client: self.0.clone(),
location: location.into(),
os_family_name: os_family_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_os_families(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list_os_families::Builder {
list_os_families::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get_os_version {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) os_version_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OsVersion, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/cloudServiceOsVersions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.os_version_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OsVersion =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_os_versions {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OsVersionListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/cloudServiceOsVersions",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OsVersionListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_os_family {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) os_family_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OsFamily, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/cloudServiceOsFamilies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.os_family_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OsFamily =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_os_families {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OsFamilyListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/cloudServiceOsFamilies",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OsFamilyListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ComputeOperationListResult, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.Compute/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ComputeOperationListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod availability_sets {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
availability_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
availability_set_name: availability_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
availability_set_name: impl Into<String>,
parameters: impl Into<models::AvailabilitySet>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
availability_set_name: availability_set_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
availability_set_name: impl Into<String>,
parameters: impl Into<models::AvailabilitySetUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
availability_set_name: availability_set_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
availability_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
availability_set_name: availability_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn list(&self, resource_group_name: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_available_sizes(
&self,
resource_group_name: impl Into<String>,
availability_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_available_sizes::Builder {
list_available_sizes::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
availability_set_name: availability_set_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) availability_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AvailabilitySet, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AvailabilitySet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) availability_set_name: String,
pub(crate) parameters: models::AvailabilitySet,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AvailabilitySet, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AvailabilitySet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) availability_set_name: String,
pub(crate) parameters: models::AvailabilitySetUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AvailabilitySet, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AvailabilitySet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) availability_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AvailabilitySetListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/availabilitySets",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AvailabilitySetListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AvailabilitySetListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AvailabilitySetListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_available_sizes {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) availability_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineSizeListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}/vmSizes",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineSizeListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod proximity_placement_groups {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
proximity_placement_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
proximity_placement_group_name: proximity_placement_group_name.into(),
subscription_id: subscription_id.into(),
include_colocation_status: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
proximity_placement_group_name: impl Into<String>,
parameters: impl Into<models::ProximityPlacementGroup>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
proximity_placement_group_name: proximity_placement_group_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
proximity_placement_group_name: impl Into<String>,
parameters: impl Into<models::ProximityPlacementGroupUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
proximity_placement_group_name: proximity_placement_group_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
proximity_placement_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
proximity_placement_group_name: proximity_placement_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) proximity_placement_group_name: String,
pub(crate) subscription_id: String,
pub(crate) include_colocation_status: Option<String>,
}
impl Builder {
pub fn include_colocation_status(mut self, include_colocation_status: impl Into<String>) -> Self {
self.include_colocation_status = Some(include_colocation_status.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProximityPlacementGroup, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(include_colocation_status) = &self.include_colocation_status {
url.query_pairs_mut()
.append_pair("includeColocationStatus", include_colocation_status);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProximityPlacementGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ProximityPlacementGroup),
Created201(models::ProximityPlacementGroup),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) proximity_placement_group_name: String,
pub(crate) parameters: models::ProximityPlacementGroup,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProximityPlacementGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProximityPlacementGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) proximity_placement_group_name: String,
pub(crate) parameters: models::ProximityPlacementGroupUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProximityPlacementGroup, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProximityPlacementGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) proximity_placement_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProximityPlacementGroupListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/proximityPlacementGroups",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProximityPlacementGroupListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProximityPlacementGroupListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProximityPlacementGroupListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod dedicated_host_groups {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
parameters: impl Into<models::DedicatedHostGroup>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
parameters: impl Into<models::DedicatedHostGroupUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHostGroup, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::DedicatedHostGroup),
Created201(models::DedicatedHostGroup),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) parameters: models::DedicatedHostGroup,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) parameters: models::DedicatedHostGroupUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHostGroup, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHostGroupListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostGroupListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHostGroupListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/hostGroups",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostGroupListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod dedicated_hosts {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
host_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
host_name: host_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
host_name: impl Into<String>,
parameters: impl Into<models::DedicatedHost>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
host_name: host_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
host_name: impl Into<String>,
parameters: impl Into<models::DedicatedHostUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
host_name: host_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
host_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
host_name: host_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_host_group(
&self,
resource_group_name: impl Into<String>,
host_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_host_group::Builder {
list_by_host_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
host_group_name: host_group_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) host_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHost, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}/hosts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name,
&self.host_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHost =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::DedicatedHost),
Created201(models::DedicatedHost),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) host_name: String,
pub(crate) parameters: models::DedicatedHost,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}/hosts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name,
&self.host_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHost =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHost =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) host_name: String,
pub(crate) parameters: models::DedicatedHostUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHost, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}/hosts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name,
&self.host_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHost =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) host_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}/hosts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name,
&self.host_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_host_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) host_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHostListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/hostGroups/{}/hosts",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.host_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHostListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod ssh_public_keys {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
ssh_public_key_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
ssh_public_key_name: ssh_public_key_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create(
&self,
resource_group_name: impl Into<String>,
ssh_public_key_name: impl Into<String>,
parameters: impl Into<models::SshPublicKeyResource>,
subscription_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
ssh_public_key_name: ssh_public_key_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
ssh_public_key_name: impl Into<String>,
parameters: impl Into<models::SshPublicKeyUpdateResource>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
ssh_public_key_name: ssh_public_key_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
ssh_public_key_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
ssh_public_key_name: ssh_public_key_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn generate_key_pair(
&self,
resource_group_name: impl Into<String>,
ssh_public_key_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> generate_key_pair::Builder {
generate_key_pair::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
ssh_public_key_name: ssh_public_key_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list_by_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::SshPublicKeysGroupListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/sshPublicKeys",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeysGroupListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::SshPublicKeysGroupListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/sshPublicKeys",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeysGroupListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) ssh_public_key_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SshPublicKeyResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/sshPublicKeys/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.ssh_public_key_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::SshPublicKeyResource),
Created201(models::SshPublicKeyResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) ssh_public_key_name: String,
pub(crate) parameters: models::SshPublicKeyResource,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/sshPublicKeys/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.ssh_public_key_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) ssh_public_key_name: String,
pub(crate) parameters: models::SshPublicKeyUpdateResource,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SshPublicKeyResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/sshPublicKeys/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.ssh_public_key_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) ssh_public_key_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/sshPublicKeys/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.ssh_public_key_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod generate_key_pair {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) ssh_public_key_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::SshPublicKeyGenerateKeyPairResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/sshPublicKeys/{}/generateKeyPair",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.ssh_public_key_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SshPublicKeyGenerateKeyPairResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_extension_images {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
type_: impl Into<String>,
version: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
type_: type_.into(),
version: version.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_types(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_types::Builder {
list_types::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_versions(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
type_: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_versions::Builder {
list_versions::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
type_: type_.into(),
subscription_id: subscription_id.into(),
filter: None,
top: None,
orderby: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) type_: String,
pub(crate) version: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineExtensionImage, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions/{}" , self . client . endpoint () , & self . subscription_id , & self . location , & self . publisher_name , & self . type_ , & self . version) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineExtensionImage =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_types {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineExtensionImage>, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.publisher_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineExtensionImage> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_versions {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) type_: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) top: Option<i32>,
pub(crate) orderby: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn top(mut self, top: i32) -> Self {
self.top = Some(top);
self
}
pub fn orderby(mut self, orderby: impl Into<String>) -> Self {
self.orderby = Some(orderby.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineExtensionImage>, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions" , self . client . endpoint () , & self . subscription_id , & self . location , & self . publisher_name , & self . type_) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(top) = &self.top {
url.query_pairs_mut().append_pair("$top", &top.to_string());
}
if let Some(orderby) = &self.orderby {
url.query_pairs_mut().append_pair("$orderby", orderby);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineExtensionImage> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_extensions {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
vm_extension_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
vm_extension_name: vm_extension_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
vm_extension_name: impl Into<String>,
extension_parameters: impl Into<models::VirtualMachineExtension>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
vm_extension_name: vm_extension_name.into(),
extension_parameters: extension_parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
vm_extension_name: impl Into<String>,
extension_parameters: impl Into<models::VirtualMachineExtensionUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
vm_extension_name: vm_extension_name.into(),
extension_parameters: extension_parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
vm_extension_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
vm_extension_name: vm_extension_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) vm_extension_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineExtension, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineExtension),
Created201(models::VirtualMachineExtension),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) vm_extension_name: String,
pub(crate) extension_parameters: models::VirtualMachineExtension,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.extension_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) vm_extension_name: String,
pub(crate) extension_parameters: models::VirtualMachineExtensionUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineExtension, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.extension_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) vm_extension_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineExtensionsListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineExtensionsListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_images {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
offer: impl Into<String>,
skus: impl Into<String>,
version: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
offer: offer.into(),
skus: skus.into(),
version: version.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
offer: impl Into<String>,
skus: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
offer: offer.into(),
skus: skus.into(),
subscription_id: subscription_id.into(),
expand: None,
top: None,
orderby: None,
}
}
pub fn list_offers(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_offers::Builder {
list_offers::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_publishers(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list_publishers::Builder {
list_publishers::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_skus(
&self,
location: impl Into<String>,
publisher_name: impl Into<String>,
offer: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_skus::Builder {
list_skus::Builder {
client: self.0.clone(),
location: location.into(),
publisher_name: publisher_name.into(),
offer: offer.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) offer: String,
pub(crate) skus: String,
pub(crate) version: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineImage, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions/{}" , self . client . endpoint () , & self . subscription_id , & self . location , & self . publisher_name , & self . offer , & self . skus , & self . version) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineImage =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) offer: String,
pub(crate) skus: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
pub(crate) top: Option<i32>,
pub(crate) orderby: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn top(mut self, top: i32) -> Self {
self.top = Some(top);
self
}
pub fn orderby(mut self, orderby: impl Into<String>) -> Self {
self.orderby = Some(orderby.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions" , self . client . endpoint () , & self . subscription_id , & self . location , & self . publisher_name , & self . offer , & self . skus) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
if let Some(top) = &self.top {
url.query_pairs_mut().append_pair("$top", &top.to_string());
}
if let Some(orderby) = &self.orderby {
url.query_pairs_mut().append_pair("$orderby", orderby);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_offers {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.publisher_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_publishers {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_skus {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) publisher_name: String,
pub(crate) offer: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.publisher_name,
&self.offer
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_images_edge_zone {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
location: impl Into<String>,
edge_zone: impl Into<String>,
publisher_name: impl Into<String>,
offer: impl Into<String>,
skus: impl Into<String>,
version: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
location: location.into(),
edge_zone: edge_zone.into(),
publisher_name: publisher_name.into(),
offer: offer.into(),
skus: skus.into(),
version: version.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
location: impl Into<String>,
edge_zone: impl Into<String>,
publisher_name: impl Into<String>,
offer: impl Into<String>,
skus: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
location: location.into(),
edge_zone: edge_zone.into(),
publisher_name: publisher_name.into(),
offer: offer.into(),
skus: skus.into(),
subscription_id: subscription_id.into(),
expand: None,
top: None,
orderby: None,
}
}
pub fn list_offers(
&self,
location: impl Into<String>,
edge_zone: impl Into<String>,
publisher_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_offers::Builder {
list_offers::Builder {
client: self.0.clone(),
location: location.into(),
edge_zone: edge_zone.into(),
publisher_name: publisher_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_publishers(
&self,
location: impl Into<String>,
edge_zone: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_publishers::Builder {
list_publishers::Builder {
client: self.0.clone(),
location: location.into(),
edge_zone: edge_zone.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_skus(
&self,
location: impl Into<String>,
edge_zone: impl Into<String>,
publisher_name: impl Into<String>,
offer: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_skus::Builder {
list_skus::Builder {
client: self.0.clone(),
location: location.into(),
edge_zone: edge_zone.into(),
publisher_name: publisher_name.into(),
offer: offer.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) edge_zone: String,
pub(crate) publisher_name: String,
pub(crate) offer: String,
pub(crate) skus: String,
pub(crate) version: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineImage, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/edgeZones/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions/{}" , self . client . endpoint () , & self . subscription_id , & self . location , & self . edge_zone , & self . publisher_name , & self . offer , & self . skus , & self . version) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineImage =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) edge_zone: String,
pub(crate) publisher_name: String,
pub(crate) offer: String,
pub(crate) skus: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
pub(crate) top: Option<i32>,
pub(crate) orderby: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn top(mut self, top: i32) -> Self {
self.top = Some(top);
self
}
pub fn orderby(mut self, orderby: impl Into<String>) -> Self {
self.orderby = Some(orderby.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/edgeZones/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions" , self . client . endpoint () , & self . subscription_id , & self . location , & self . edge_zone , & self . publisher_name , & self . offer , & self . skus) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
if let Some(top) = &self.top {
url.query_pairs_mut().append_pair("$top", &top.to_string());
}
if let Some(orderby) = &self.orderby {
url.query_pairs_mut().append_pair("$orderby", orderby);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_offers {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) edge_zone: String,
pub(crate) publisher_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/edgeZones/{}/publishers/{}/artifacttypes/vmimage/offers" , self . client . endpoint () , & self . subscription_id , & self . location , & self . edge_zone , & self . publisher_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_publishers {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) edge_zone: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/edgeZones/{}/publishers",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.edge_zone
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_skus {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) edge_zone: String,
pub(crate) publisher_name: String,
pub(crate) offer: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<Vec<models::VirtualMachineImageResource>, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/edgeZones/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus" , self . client . endpoint () , & self . subscription_id , & self . location , & self . edge_zone , & self . publisher_name , & self . offer) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: Vec<models::VirtualMachineImageResource> =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod usage {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ListUsagesResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/usages",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ListUsagesResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machines {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_location(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list_by_location::Builder {
list_by_location::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
pub fn capture(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
parameters: impl Into<models::VirtualMachineCaptureParameters>,
subscription_id: impl Into<String>,
) -> capture::Builder {
capture::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
parameters: impl Into<models::VirtualMachine>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
parameters: impl Into<models::VirtualMachineUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
force_deletion: None,
}
}
pub fn instance_view(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> instance_view::Builder {
instance_view::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn convert_to_managed_disks(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> convert_to_managed_disks::Builder {
convert_to_managed_disks::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn deallocate(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> deallocate::Builder {
deallocate::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn generalize(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> generalize::Builder {
generalize::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(&self, resource_group_name: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_all(&self, subscription_id: impl Into<String>) -> list_all::Builder {
list_all::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
status_only: None,
}
}
pub fn list_available_sizes(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_available_sizes::Builder {
list_available_sizes::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn power_off(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> power_off::Builder {
power_off::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
skip_shutdown: None,
}
}
pub fn reapply(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> reapply::Builder {
reapply::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn restart(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> restart::Builder {
restart::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn start(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> start::Builder {
start::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn redeploy(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> redeploy::Builder {
redeploy::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn reimage(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage::Builder {
reimage::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn retrieve_boot_diagnostics_data(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> retrieve_boot_diagnostics_data::Builder {
retrieve_boot_diagnostics_data::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
sas_uri_expiration_time_in_minutes: None,
}
}
pub fn perform_maintenance(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> perform_maintenance::Builder {
perform_maintenance::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn simulate_eviction(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> simulate_eviction::Builder {
simulate_eviction::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn assess_patches(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> assess_patches::Builder {
assess_patches::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn install_patches(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
install_patches_input: impl Into<models::VirtualMachineInstallPatchesParameters>,
subscription_id: impl Into<String>,
) -> install_patches::Builder {
install_patches::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
install_patches_input: install_patches_input.into(),
subscription_id: subscription_id.into(),
}
}
pub fn run_command(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
parameters: impl Into<models::RunCommandInput>,
subscription_id: impl Into<String>,
) -> run_command::Builder {
run_command::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list_by_location {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/virtualMachines",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod capture {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineCaptureResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) parameters: models::VirtualMachineCaptureParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/capture",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineCaptureResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachine, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachine),
Created201(models::VirtualMachine),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) parameters: models::VirtualMachine,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) parameters: models::VirtualMachineUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachine, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) force_deletion: Option<bool>,
}
impl Builder {
pub fn force_deletion(mut self, force_deletion: bool) -> Self {
self.force_deletion = Some(force_deletion);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(force_deletion) = &self.force_deletion {
url.query_pairs_mut().append_pair("forceDeletion", &force_deletion.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod instance_view {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineInstanceView, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/instanceView",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineInstanceView =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod convert_to_managed_disks {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/convertToManagedDisks",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod deallocate {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/deallocate",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod generalize {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/generalize",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_all {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) status_only: Option<String>,
}
impl Builder {
pub fn status_only(mut self, status_only: impl Into<String>) -> Self {
self.status_only = Some(status_only.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/virtualMachines",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(status_only) = &self.status_only {
url.query_pairs_mut().append_pair("statusOnly", status_only);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_available_sizes {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineSizeListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/vmSizes",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineSizeListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod power_off {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) skip_shutdown: Option<bool>,
}
impl Builder {
pub fn skip_shutdown(mut self, skip_shutdown: bool) -> Self {
self.skip_shutdown = Some(skip_shutdown);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/powerOff",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(skip_shutdown) = &self.skip_shutdown {
url.query_pairs_mut().append_pair("skipShutdown", &skip_shutdown.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod reapply {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/reapply",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod restart {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/restart",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod start {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/start",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod redeploy {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/redeploy",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod reimage {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::VirtualMachineReimageParameters>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::VirtualMachineReimageParameters>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/reimage",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod retrieve_boot_diagnostics_data {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) sas_uri_expiration_time_in_minutes: Option<i32>,
}
impl Builder {
pub fn sas_uri_expiration_time_in_minutes(mut self, sas_uri_expiration_time_in_minutes: i32) -> Self {
self.sas_uri_expiration_time_in_minutes = Some(sas_uri_expiration_time_in_minutes);
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::RetrieveBootDiagnosticsDataResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/retrieveBootDiagnosticsData",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(sas_uri_expiration_time_in_minutes) = &self.sas_uri_expiration_time_in_minutes {
url.query_pairs_mut()
.append_pair("sasUriExpirationTimeInMinutes", &sas_uri_expiration_time_in_minutes.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RetrieveBootDiagnosticsDataResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod perform_maintenance {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/performMaintenance",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod simulate_eviction {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/simulateEviction",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod assess_patches {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineAssessPatchesResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/assessPatches",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineAssessPatchesResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod install_patches {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineInstallPatchesResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) install_patches_input: models::VirtualMachineInstallPatchesParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/installPatches",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.install_patches_input).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineInstallPatchesResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod run_command {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::RunCommandResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) parameters: models::RunCommandInput,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommand",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RunCommandResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_scale_sets {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_location(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list_by_location::Builder {
list_by_location::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
parameters: impl Into<models::VirtualMachineScaleSet>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
parameters: impl Into<models::VirtualMachineScaleSetUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
force_deletion: None,
}
}
pub fn deallocate(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> deallocate::Builder {
deallocate::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
}
}
pub fn delete_instances(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceRequiredIDs>,
subscription_id: impl Into<String>,
) -> delete_instances::Builder {
delete_instances::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
vm_instance_i_ds: vm_instance_i_ds.into(),
subscription_id: subscription_id.into(),
force_deletion: None,
}
}
pub fn get_instance_view(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_instance_view::Builder {
get_instance_view::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(&self, resource_group_name: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_all(&self, subscription_id: impl Into<String>) -> list_all::Builder {
list_all::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list_skus(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_skus::Builder {
list_skus::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_os_upgrade_history(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_os_upgrade_history::Builder {
get_os_upgrade_history::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn power_off(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> power_off::Builder {
power_off::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
skip_shutdown: None,
}
}
pub fn restart(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> restart::Builder {
restart::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
}
}
pub fn start(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> start::Builder {
start::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
}
}
pub fn redeploy(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> redeploy::Builder {
redeploy::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
}
}
pub fn perform_maintenance(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> perform_maintenance::Builder {
perform_maintenance::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
}
}
pub fn update_instances(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceRequiredIDs>,
subscription_id: impl Into<String>,
) -> update_instances::Builder {
update_instances::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
vm_instance_i_ds: vm_instance_i_ds.into(),
subscription_id: subscription_id.into(),
}
}
pub fn reimage(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage::Builder {
reimage::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_scale_set_reimage_input: None,
}
}
pub fn reimage_all(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage_all::Builder {
reimage_all::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
vm_instance_i_ds: None,
}
}
pub fn force_recovery_service_fabric_platform_update_domain_walk(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
platform_update_domain: i64,
) -> force_recovery_service_fabric_platform_update_domain_walk::Builder {
force_recovery_service_fabric_platform_update_domain_walk::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
platform_update_domain,
}
}
pub fn convert_to_single_placement_group(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
parameters: impl Into<models::VmScaleSetConvertToSinglePlacementGroupInput>,
subscription_id: impl Into<String>,
) -> convert_to_single_placement_group::Builder {
convert_to_single_placement_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn set_orchestration_service_state(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
parameters: impl Into<models::OrchestrationServiceStateInput>,
subscription_id: impl Into<String>,
) -> set_orchestration_service_state::Builder {
set_orchestration_service_state::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list_by_location {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/virtualMachineScaleSets",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSet, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineScaleSet),
Created201(models::VirtualMachineScaleSet),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) parameters: models::VirtualMachineScaleSet,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) parameters: models::VirtualMachineScaleSetUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSet, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSet =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) force_deletion: Option<bool>,
}
impl Builder {
pub fn force_deletion(mut self, force_deletion: bool) -> Self {
self.force_deletion = Some(force_deletion);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(force_deletion) = &self.force_deletion {
url.query_pairs_mut().append_pair("forceDeletion", &force_deletion.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod deallocate {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/deallocate",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete_instances {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) vm_instance_i_ds: models::VirtualMachineScaleSetVmInstanceRequiredIDs,
pub(crate) subscription_id: String,
pub(crate) force_deletion: Option<bool>,
}
impl Builder {
pub fn force_deletion(mut self, force_deletion: bool) -> Self {
self.force_deletion = Some(force_deletion);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/delete",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.vm_instance_i_ds).map_err(Error::Serialize)?;
if let Some(force_deletion) = &self.force_deletion {
url.query_pairs_mut().append_pair("forceDeletion", &force_deletion.to_string());
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get_instance_view {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetInstanceView, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/instanceView",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetInstanceView =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_all {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetListWithLinkResult, Error>>
{
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/virtualMachineScaleSets",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetListWithLinkResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_skus {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetListSkusResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/skus",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetListSkusResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get_os_upgrade_history {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetListOsUpgradeHistory, Error>>
{
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/osUpgradeHistory",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetListOsUpgradeHistory =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod power_off {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
pub(crate) skip_shutdown: Option<bool>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn skip_shutdown(mut self, skip_shutdown: bool) -> Self {
self.skip_shutdown = Some(skip_shutdown);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/poweroff",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
if let Some(skip_shutdown) = &self.skip_shutdown {
url.query_pairs_mut().append_pair("skipShutdown", &skip_shutdown.to_string());
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod restart {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/restart",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod start {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/start",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod redeploy {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/redeploy",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod perform_maintenance {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/performMaintenance",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update_instances {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) vm_instance_i_ds: models::VirtualMachineScaleSetVmInstanceRequiredIDs,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/manualupgrade",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.vm_instance_i_ds).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod reimage {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_scale_set_reimage_input: Option<models::VirtualMachineScaleSetReimageParameters>,
}
impl Builder {
pub fn vm_scale_set_reimage_input(
mut self,
vm_scale_set_reimage_input: impl Into<models::VirtualMachineScaleSetReimageParameters>,
) -> Self {
self.vm_scale_set_reimage_input = Some(vm_scale_set_reimage_input.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/reimage",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_scale_set_reimage_input) = &self.vm_scale_set_reimage_input {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_scale_set_reimage_input).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod reimage_all {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_instance_i_ds: Option<models::VirtualMachineScaleSetVmInstanceIDs>,
}
impl Builder {
pub fn vm_instance_i_ds(mut self, vm_instance_i_ds: impl Into<models::VirtualMachineScaleSetVmInstanceIDs>) -> Self {
self.vm_instance_i_ds = Some(vm_instance_i_ds.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/reimageall",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_instance_i_ds) = &self.vm_instance_i_ds {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_instance_i_ds).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod force_recovery_service_fabric_platform_update_domain_walk {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) platform_update_domain: i64,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RecoveryWalkResponse, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/forceRecoveryServiceFabricPlatformUpdateDomainWalk" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let platform_update_domain = &self.platform_update_domain;
url.query_pairs_mut()
.append_pair("platformUpdateDomain", &platform_update_domain.to_string());
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RecoveryWalkResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod convert_to_single_placement_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) parameters: models::VmScaleSetConvertToSinglePlacementGroupInput,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/convertToSinglePlacementGroup" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod set_orchestration_service_state {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) parameters: models::OrchestrationServiceStateInput,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/setOrchestrationServiceState" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_sizes {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineSizeListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/vmSizes",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineSizeListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod images {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
image_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
image_name: image_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
image_name: impl Into<String>,
parameters: impl Into<models::Image>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
image_name: image_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
image_name: impl Into<String>,
parameters: impl Into<models::ImageUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
image_name: image_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
image_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
image_name: image_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) image_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Image, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.image_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Image =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Image),
Created201(models::Image),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) image_name: String,
pub(crate) parameters: models::Image,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.image_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Image =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Image =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Image),
Created201(models::Image),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) image_name: String,
pub(crate) parameters: models::ImageUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.image_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Image =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Image =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) image_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.image_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ImageListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ImageListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ImageListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/images",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ImageListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod restore_point_collections {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
parameters: impl Into<models::RestorePointCollection>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
parameters: impl Into<models::RestorePointCollectionUpdate>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
}
}
pub fn list(&self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
pub fn list_all(&self, subscription_id: impl Into<String>) -> list_all::Builder {
list_all::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RestorePointCollection, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePointCollection =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::RestorePointCollection),
Created201(models::RestorePointCollection),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
pub(crate) parameters: models::RestorePointCollection,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePointCollection =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePointCollection =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
pub(crate) parameters: models::RestorePointCollectionUpdate,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RestorePointCollection, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePointCollection =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::RestorePointCollectionListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePointCollectionListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_all {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::RestorePointCollectionListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/restorePointCollections",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePointCollectionListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod restore_points {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
restore_point_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
restore_point_name: restore_point_name.into(),
}
}
pub fn create(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
restore_point_name: impl Into<String>,
parameters: impl Into<models::RestorePoint>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
restore_point_name: restore_point_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
restore_point_collection_name: impl Into<String>,
restore_point_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
restore_point_collection_name: restore_point_collection_name.into(),
restore_point_name: restore_point_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
pub(crate) restore_point_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RestorePoint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}/restorePoints/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name,
&self.restore_point_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePoint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
pub(crate) restore_point_name: String,
pub(crate) parameters: models::RestorePoint,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RestorePoint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}/restorePoints/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name,
&self.restore_point_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RestorePoint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) restore_point_collection_name: String,
pub(crate) restore_point_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/restorePointCollections/{}/restorePoints/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.restore_point_collection_name,
&self.restore_point_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod virtual_machine_scale_set_extensions {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
vmss_extension_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
vmss_extension_name: vmss_extension_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
vmss_extension_name: impl Into<String>,
extension_parameters: impl Into<models::VirtualMachineScaleSetExtension>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
vmss_extension_name: vmss_extension_name.into(),
extension_parameters: extension_parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
vmss_extension_name: impl Into<String>,
extension_parameters: impl Into<models::VirtualMachineScaleSetExtensionUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
vmss_extension_name: vmss_extension_name.into(),
extension_parameters: extension_parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
vmss_extension_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
vmss_extension_name: vmss_extension_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) vmss_extension_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetExtension, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineScaleSetExtension),
Created201(models::VirtualMachineScaleSetExtension),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) vmss_extension_name: String,
pub(crate) extension_parameters: models::VirtualMachineScaleSetExtension,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.extension_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineScaleSetExtension),
Created201(models::VirtualMachineScaleSetExtension),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) vmss_extension_name: String,
pub(crate) extension_parameters: models::VirtualMachineScaleSetExtensionUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.extension_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) vmss_extension_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetExtensionListResult, Error>>
{
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetExtensionListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_scale_set_rolling_upgrades {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn cancel(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> cancel::Builder {
cancel::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn start_os_upgrade(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> start_os_upgrade::Builder {
start_os_upgrade::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn start_extension_upgrade(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> start_extension_upgrade::Builder {
start_extension_upgrade::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_latest(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_latest::Builder {
get_latest::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod cancel {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/rollingUpgrades/cancel" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod start_os_upgrade {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/osRollingUpgrade",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod start_extension_upgrade {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensionRollingUpgrade" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get_latest {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RollingUpgradeStatusInfo, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/rollingUpgrades/latest" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RollingUpgradeStatusInfo =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_scale_set_vm_extensions {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
vm_extension_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
vm_extension_name: vm_extension_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
vm_extension_name: impl Into<String>,
extension_parameters: impl Into<models::VirtualMachineScaleSetVmExtension>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
vm_extension_name: vm_extension_name.into(),
extension_parameters: extension_parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
vm_extension_name: impl Into<String>,
extension_parameters: impl Into<models::VirtualMachineScaleSetVmExtensionUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
vm_extension_name: vm_extension_name.into(),
extension_parameters: extension_parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
vm_extension_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
vm_extension_name: vm_extension_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) vm_extension_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetVmExtension, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/extensions/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . vm_extension_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineScaleSetVmExtension),
Created201(models::VirtualMachineScaleSetVmExtension),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) vm_extension_name: String,
pub(crate) extension_parameters: models::VirtualMachineScaleSetVmExtension,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/extensions/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . vm_extension_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.extension_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) vm_extension_name: String,
pub(crate) extension_parameters: models::VirtualMachineScaleSetVmExtensionUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetVmExtension, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/extensions/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . vm_extension_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.extension_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmExtension =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) vm_extension_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/extensions/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . vm_extension_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetVmExtensionsListResult, Error>>
{
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/extensions" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmExtensionsListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod virtual_machine_scale_set_v_ms {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn reimage(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage::Builder {
reimage::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
vm_scale_set_vm_reimage_input: None,
}
}
pub fn reimage_all(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> reimage_all::Builder {
reimage_all::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn deallocate(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> deallocate::Builder {
deallocate::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
parameters: impl Into<models::VirtualMachineScaleSetVm>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
force_deletion: None,
}
}
pub fn get_instance_view(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_instance_view::Builder {
get_instance_view::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
virtual_machine_scale_set_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
virtual_machine_scale_set_name: virtual_machine_scale_set_name.into(),
subscription_id: subscription_id.into(),
filter: None,
select: None,
expand: None,
}
}
pub fn power_off(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> power_off::Builder {
power_off::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
skip_shutdown: None,
}
}
pub fn restart(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> restart::Builder {
restart::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn start(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> start::Builder {
start::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn redeploy(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> redeploy::Builder {
redeploy::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn retrieve_boot_diagnostics_data(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> retrieve_boot_diagnostics_data::Builder {
retrieve_boot_diagnostics_data::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
sas_uri_expiration_time_in_minutes: None,
}
}
pub fn perform_maintenance(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> perform_maintenance::Builder {
perform_maintenance::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn simulate_eviction(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> simulate_eviction::Builder {
simulate_eviction::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn run_command(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
parameters: impl Into<models::RunCommandInput>,
subscription_id: impl Into<String>,
) -> run_command::Builder {
run_command::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod reimage {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) vm_scale_set_vm_reimage_input: Option<models::VirtualMachineScaleSetVmReimageParameters>,
}
impl Builder {
pub fn vm_scale_set_vm_reimage_input(
mut self,
vm_scale_set_vm_reimage_input: impl Into<models::VirtualMachineScaleSetVmReimageParameters>,
) -> Self {
self.vm_scale_set_vm_reimage_input = Some(vm_scale_set_vm_reimage_input.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/reimage" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(vm_scale_set_vm_reimage_input) = &self.vm_scale_set_vm_reimage_input {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(vm_scale_set_vm_reimage_input).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod reimage_all {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/reimageall" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod deallocate {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/deallocate" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetVm, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.instance_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineScaleSetVm),
Accepted202(models::VirtualMachineScaleSetVm),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) parameters: models::VirtualMachineScaleSetVm,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.instance_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) force_deletion: Option<bool>,
}
impl Builder {
pub fn force_deletion(mut self, force_deletion: bool) -> Self {
self.force_deletion = Some(force_deletion);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_scale_set_name,
&self.instance_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(force_deletion) = &self.force_deletion {
url.query_pairs_mut().append_pair("forceDeletion", &force_deletion.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get_instance_view {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetVmInstanceView, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/instanceView" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmInstanceView =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) virtual_machine_scale_set_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) select: Option<String>,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn select(mut self, select: impl Into<String>) -> Self {
self.select = Some(select.into());
self
}
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineScaleSetVmListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.virtual_machine_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = &self.select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineScaleSetVmListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod power_off {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) skip_shutdown: Option<bool>,
}
impl Builder {
pub fn skip_shutdown(mut self, skip_shutdown: bool) -> Self {
self.skip_shutdown = Some(skip_shutdown);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/poweroff" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(skip_shutdown) = &self.skip_shutdown {
url.query_pairs_mut().append_pair("skipShutdown", &skip_shutdown.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod restart {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/restart" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod start {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/start" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod redeploy {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/redeploy" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod retrieve_boot_diagnostics_data {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) sas_uri_expiration_time_in_minutes: Option<i32>,
}
impl Builder {
pub fn sas_uri_expiration_time_in_minutes(mut self, sas_uri_expiration_time_in_minutes: i32) -> Self {
self.sas_uri_expiration_time_in_minutes = Some(sas_uri_expiration_time_in_minutes);
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::RetrieveBootDiagnosticsDataResult, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/retrieveBootDiagnosticsData" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(sas_uri_expiration_time_in_minutes) = &self.sas_uri_expiration_time_in_minutes {
url.query_pairs_mut()
.append_pair("sasUriExpirationTimeInMinutes", &sas_uri_expiration_time_in_minutes.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RetrieveBootDiagnosticsDataResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod perform_maintenance {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/performMaintenance" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod simulate_eviction {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/simulateEviction" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod run_command {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::RunCommandResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) parameters: models::RunCommandInput,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/runCommand" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RunCommandResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod log_analytics {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn export_request_rate_by_interval(
&self,
parameters: impl Into<models::RequestRateByIntervalInput>,
location: impl Into<String>,
subscription_id: impl Into<String>,
) -> export_request_rate_by_interval::Builder {
export_request_rate_by_interval::Builder {
client: self.0.clone(),
parameters: parameters.into(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
pub fn export_throttled_requests(
&self,
parameters: impl Into<models::ThrottledRequestsInput>,
location: impl Into<String>,
subscription_id: impl Into<String>,
) -> export_throttled_requests::Builder {
export_throttled_requests::Builder {
client: self.0.clone(),
parameters: parameters.into(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod export_request_rate_by_interval {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::LogAnalyticsOperationResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) parameters: models::RequestRateByIntervalInput,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/logAnalytics/apiAccess/getRequestRateByInterval",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::LogAnalyticsOperationResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod export_throttled_requests {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::LogAnalyticsOperationResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) parameters: models::ThrottledRequestsInput,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/logAnalytics/apiAccess/getThrottledRequests",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::LogAnalyticsOperationResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod virtual_machine_run_commands {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
location: location.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(&self, location: impl Into<String>, command_id: impl Into<String>, subscription_id: impl Into<String>) -> get::Builder {
get::Builder {
client: self.0.clone(),
location: location.into(),
command_id: command_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get_by_virtual_machine(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
run_command_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_by_virtual_machine::Builder {
get_by_virtual_machine::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
run_command_name: run_command_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
run_command_name: impl Into<String>,
run_command: impl Into<models::VirtualMachineRunCommand>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
run_command_name: run_command_name.into(),
run_command: run_command.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
run_command_name: impl Into<String>,
run_command: impl Into<models::VirtualMachineRunCommandUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
run_command_name: run_command_name.into(),
run_command: run_command.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
run_command_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
run_command_name: run_command_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_virtual_machine(
&self,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_virtual_machine::Builder {
list_by_virtual_machine::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RunCommandListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/runCommands",
self.client.endpoint(),
&self.subscription_id,
&self.location
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RunCommandListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location: String,
pub(crate) command_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RunCommandDocument, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/runCommands/{}",
self.client.endpoint(),
&self.subscription_id,
&self.location,
&self.command_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RunCommandDocument =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get_by_virtual_machine {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) run_command_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineRunCommand, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommands/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.run_command_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineRunCommand),
Created201(models::VirtualMachineRunCommand),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) run_command_name: String,
pub(crate) run_command: models::VirtualMachineRunCommand,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommands/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.run_command_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.run_command).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) run_command_name: String,
pub(crate) run_command: models::VirtualMachineRunCommandUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineRunCommand, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommands/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.run_command_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.run_command).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) run_command_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommands/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name,
&self.run_command_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_virtual_machine {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineRunCommandsListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommands",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vm_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommandsListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod virtual_machine_scale_set_vm_run_commands {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
run_command_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
run_command_name: run_command_name.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
run_command_name: impl Into<String>,
run_command: impl Into<models::VirtualMachineRunCommand>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
run_command_name: run_command_name.into(),
run_command: run_command.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
run_command_name: impl Into<String>,
run_command: impl Into<models::VirtualMachineRunCommandUpdate>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
run_command_name: run_command_name.into(),
run_command: run_command.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
run_command_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
run_command_name: run_command_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
vm_scale_set_name: impl Into<String>,
instance_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
vm_scale_set_name: vm_scale_set_name.into(),
instance_id: instance_id.into(),
subscription_id: subscription_id.into(),
expand: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) run_command_name: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineRunCommand, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/runCommands/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . run_command_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VirtualMachineRunCommand),
Created201(models::VirtualMachineRunCommand),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) run_command_name: String,
pub(crate) run_command: models::VirtualMachineRunCommand,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/runCommands/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . run_command_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.run_command).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) run_command_name: String,
pub(crate) run_command: models::VirtualMachineRunCommandUpdate,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineRunCommand, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/runCommands/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . run_command_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.run_command).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommand =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) run_command_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/runCommands/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id , & self . run_command_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) vm_scale_set_name: String,
pub(crate) instance_id: String,
pub(crate) subscription_id: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::VirtualMachineRunCommandsListResult, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/runCommands" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_scale_set_name , & self . instance_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VirtualMachineRunCommandsListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| 51.762922 | 366 | 0.524488 |
7289f48343d68c4dd072944a7909c462b7a1d4dc | 5,276 | use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use failure::Error;
use proc_macro2::TokenTree;
use syn;
use syn::spanned::Spanned;
#[derive(Debug)]
pub struct InlineSnapshot {
start: (usize, usize),
end: (usize, usize),
}
#[derive(Debug)]
pub struct FilePatcher {
filename: PathBuf,
lines: Vec<String>,
newline: &'static str,
source: syn::File,
inline_snapshots: Vec<InlineSnapshot>,
}
impl FilePatcher {
pub fn open<P: AsRef<Path>>(p: P) -> Result<FilePatcher, Error> {
let filename = p.as_ref().to_path_buf();
let contents = fs::read_to_string(p)?;
let source = syn::parse_file(&contents)?;
let mut line_iter = contents.lines().peekable();
let newline = if let Some(line) = line_iter.peek() {
match contents.as_bytes().get(line.len() + 1) {
Some(b'\r') => &"\r\n",
_ => &"\n",
}
} else {
&"\n"
};
let lines: Vec<String> = line_iter.map(|x| x.into()).collect();
Ok(FilePatcher {
filename,
source,
newline,
lines,
inline_snapshots: vec![],
})
}
pub fn save(&self) -> Result<(), Error> {
let mut f = fs::File::create(&self.filename)?;
for line in &self.lines {
writeln!(&mut f, "{}", line)?;
}
Ok(())
}
pub fn add_snapshot_macro(&mut self, line: usize) {
match self.find_snapshot_macro(line) {
Some(snapshot) => {
assert!(self
.inline_snapshots
.last()
.map_or(true, |x| x.end.0 <= line));
self.inline_snapshots.push(snapshot)
}
None => panic!("Could not find snapshot in line {}", line),
}
}
pub fn get_new_line(&self, id: usize) -> usize {
self.inline_snapshots[id].start.0 + 1
}
pub fn set_new_content(&mut self, id: usize, snapshot: &str) {
let inline = &mut self.inline_snapshots[id];
let old_lines = inline.end.0 - inline.start.0 + 1;
// find prefix and suffix
let prefix: String = self.lines[inline.start.0]
.chars()
.take(inline.start.1)
.collect();
let suffix: String = self.lines[inline.end.0]
.chars()
.skip(inline.end.1)
.collect();
// replace lines
let mut new_lines: Vec<_> = snapshot.lines().collect();
if new_lines.is_empty() {
new_lines.push("");
}
let (quote_start, quote_end) =
if new_lines.len() > 1 || new_lines[0].contains(&['\\', '"'][..]) {
("r###\"", "\"###")
} else {
("\"", "\"")
};
let line_count_diff = new_lines.len() as i64 - old_lines as i64;
self.lines.splice(
inline.start.0..=inline.end.0,
new_lines.iter().enumerate().map(|(idx, line)| {
let mut rv = String::new();
if idx == 0 {
rv.push_str(&prefix);
rv.push_str(quote_start);
}
rv.push_str(&line);
if idx + 1 == new_lines.len() {
rv.push_str(quote_end);
rv.push_str(&suffix);
}
rv
}),
);
for inl in &mut self.inline_snapshots[id..] {
inl.start.0 = (inl.start.0 as i64 + line_count_diff) as usize;
inl.end.0 = (inl.end.0 as i64 + line_count_diff) as usize;
}
}
fn find_snapshot_macro(&self, line: usize) -> Option<InlineSnapshot> {
struct Visitor(usize, Option<InlineSnapshot>);
impl<'ast> syn::visit::Visit<'ast> for Visitor {
fn visit_macro(&mut self, i: &'ast syn::Macro) {
if i.span().start().line != self.0 || i.path.segments.is_empty() {
return;
}
let last = i.path.segments[i.path.segments.len() - 1].ident.to_string();
if !last.starts_with("assert_") || !last.ends_with("_snapshot_matches") {
return;
}
let tokens: Vec<_> = i.tts.clone().into_iter().collect();
if tokens.len() < 2 {
return;
}
match &tokens[tokens.len() - 2] {
TokenTree::Punct(ref punct) if punct.as_char() == '@' => {}
_ => return,
}
let (start, end) = match &tokens[tokens.len() - 1] {
TokenTree::Literal(lit) => {
let span = lit.span();
(
(span.start().line - 1, span.start().column),
(span.end().line - 1, span.end().column),
)
}
_ => return,
};
self.1 = Some(InlineSnapshot { start, end });
}
}
let mut visitor = Visitor(line, None);
syn::visit::visit_file(&mut visitor, &self.source);
visitor.1
}
}
| 31.404762 | 89 | 0.46323 |
f8b9c9ae542d5e0a6cd99f584f01889d5b1adba6 | 4,205 | // Copyright 2015 blake2-rfc Developers
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! The BLAKE2s hash function.
//!
//! # Examples
//!
//! ```
//! use blake2_rfc::blake2s::{Blake2s, blake2s};
//!
//! // Using the convenience function.
//! let hash = blake2s(32, &[], b"The quick brown fox jumps over the lazy dog");
//!
//! // Using the state context.
//! let mut context = Blake2s::new(32);
//! context.update(b"The quick brown fox jumps over the lazy dog");
//! let hash = context.finalize();
//!
//! // Using the convenience function, with a key.
//! let hash = blake2s(32, b"key", b"The quick brown fox jumps over the lazy dog");
//!
//! // Using the state context, with a key.
//! let mut context = Blake2s::with_key(32, b"key");
//! context.update(b"The quick brown fox jumps over the lazy dog");
//! let hash = context.finalize();
//! ```
//!
//! The returned hash is a `Blake2sResult`, which can be compared with
//! a byte string (the comparison will take constant time), or converted
//! into a byte string.
#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))]
blake2_impl!(
Blake2s, Blake2sResult, blake2s, u32,
u32x4, read_u32, 32, 16, 12, 8, 7, [
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
]);
blake2_selftest_impl!(Blake2s, blake2s, [
0x6A, 0x41, 0x1F, 0x08, 0xCE, 0x25, 0xAD, 0xCD,
0xFB, 0x02, 0xAB, 0xA6, 0x41, 0x45, 0x1C, 0xEC,
0x53, 0xC5, 0x98, 0xB2, 0x4F, 0x4F, 0xC7, 0x87,
0xFB, 0xDC, 0x88, 0x79, 0x7F, 0x4C, 0x1D, 0xFE,
], [ 16, 20, 28, 32 ], [ 0, 3, 64, 65, 255, 1024 ]);
#[cfg(test)]
mod tests {
#![cfg_attr(feature = "cargo-clippy", allow(result_unwrap_used))]
extern crate data_encoding;
use self::data_encoding::HEXUPPER;
use self::data_encoding::HEXLOWER;
use blake2::selftest_seq;
use super::{Blake2s, blake2s};
#[test]
fn test_empty() {
assert_eq!(&blake2s(32, &[], b""), &HEXUPPER.decode(
b"69217A3079908094E11121D042354A7C1F55B6482CA1A51E1B250DFD1ED0EEF9")
.unwrap()[..]);
}
#[test]
fn test_default() {
assert_eq!(&Blake2s::default().finalize(), &HEXUPPER.decode(
b"69217A3079908094E11121D042354A7C1F55B6482CA1A51E1B250DFD1ED0EEF9")
.unwrap()[..]);
}
#[test]
fn test_persona() {
let key_bytes = &HEXLOWER.decode(b"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap();
let persona = "personal";
let persona_bytes = persona.as_bytes();
let ctx = Blake2s::with_params(32, key_bytes, &[], persona_bytes);
assert_eq!(&ctx.finalize(), &HEXLOWER.decode(b"25a4ee63b594aed3f88a971e1877ef7099534f9097291f88fb86c79b5e70d022").unwrap()[..]);
}
#[test]
fn selftest() {
super::selftest();
}
#[test]
fn test_split() {
let data = selftest_seq(256);
let mut ctx = Blake2s::new(32);
ctx.update(&data[..16]);
ctx.update(&data[16..32]);
ctx.update(&data[32..224]);
ctx.update(&data[224..]);
assert_eq!(&ctx.finalize(), &blake2s(32, &[], &data));
}
#[cfg(feature = "std")]
#[test]
fn test_write() {
use std::io::prelude::*;
let data = selftest_seq(1024);
let mut ctx = Blake2s::new(32);
ctx.update(&data[..]);
let mut writer = Blake2s::new(32);
writer.write_all(&data[..]).unwrap();
assert_eq!(&writer.finalize(), &ctx.finalize());
}
#[cfg_attr(debug_assertions, ignore)]
#[test]
fn test_4g() {
const ZEROS: [u8; 4096] = [0; 4096];
let mut state = Blake2s::new(32);
for _ in 0..1048576 {
state.update(&ZEROS);
}
assert_eq!(&state.finalize(), &HEXUPPER.decode(
b"2A8E26830310DA3EF7F7032B7B1AF11B989ABA44A3713A22F539F69BD2CE4A87")
.unwrap()[..]);
}
}
| 31.148148 | 136 | 0.619501 |
62f9414aa9946ccc452fc170880f09630145785e | 3,946 | use super::{CompileNode, NodeId};
use crate::blocks::{Block, BlockPos};
use serde::Serialize;
use std::fs;
macro_rules! convert_enum {
($src:path, $dst:ident, $($variant:ident),*) => {
impl From<$src> for $dst {
fn from(src: $src) -> Self {
match src {
$(<$src>::$variant => Self::$variant,)*
}
}
}
}
}
#[derive(Serialize)]
enum LinkType {
Default,
Side,
}
convert_enum!(super::LinkType, LinkType, Default, Side);
#[derive(Serialize)]
enum ComparatorMode {
Compare,
Subtract,
}
convert_enum!(
crate::blocks::ComparatorMode,
ComparatorMode,
Compare,
Subtract
);
#[derive(Serialize)]
struct Link {
pub ty: LinkType,
pub weight: u8,
pub to: NodeId,
}
#[derive(Serialize)]
enum NodeType {
Repeater(u8),
Comparator(ComparatorMode),
Torch,
StoneButton,
StonePressurePlate,
Lamp,
Lever,
Constant,
Wire,
}
#[derive(Serialize)]
struct Node {
pub ty: NodeType,
pub inputs: Vec<Link>,
pub updates: Vec<NodeId>,
pub facing_diode: bool,
pub comparator_far_input: Option<u8>,
pub output_power: u8,
/// Comparator powered / Repeater locked
pub diode_state: bool,
pub pos: BlockPos,
}
pub fn debug(graph: &[CompileNode]) {
let mut nodes = Vec::new();
for node in graph {
let n = Node {
ty: match node.state {
Block::RedstoneRepeater { repeater } => NodeType::Repeater(repeater.delay),
Block::RedstoneComparator { comparator } => {
NodeType::Comparator(comparator.mode.into())
}
Block::RedstoneTorch { .. } => NodeType::Torch,
Block::RedstoneWallTorch { .. } => NodeType::Torch,
Block::StoneButton { .. } => NodeType::StoneButton,
Block::StonePressurePlate { .. } => NodeType::StonePressurePlate,
Block::RedstoneLamp { .. } => NodeType::Lamp,
Block::Lever { .. } => NodeType::Lever,
Block::RedstoneBlock { .. } => NodeType::Constant,
Block::RedstoneWire { .. } => NodeType::Wire,
block if block.has_comparator_override() => NodeType::Constant,
_ => continue,
},
inputs: node
.inputs
.iter()
.map(|l| Link {
ty: l.ty.into(),
to: l.end,
weight: l.weight,
})
.collect(),
updates: node.updates.clone(),
comparator_far_input: node.comparator_far_input,
diode_state: match node.state {
Block::RedstoneRepeater { repeater } => repeater.locked,
Block::RedstoneComparator { comparator } => comparator.powered,
_ => false,
},
facing_diode: node.facing_diode,
output_power: match node.state {
Block::RedstoneRepeater { repeater } => repeater.powered.then(|| 15).unwrap_or(0),
Block::RedstoneComparator { .. } => node.comparator_output,
Block::RedstoneTorch { lit } => lit.then(|| 15).unwrap_or(0),
Block::RedstoneWallTorch { lit, .. } => lit.then(|| 15).unwrap_or(0),
Block::Lever { lever } => lever.powered.then(|| 15).unwrap_or(0),
Block::StoneButton { button } => button.powered.then(|| 15).unwrap_or(0),
Block::StonePressurePlate { powered } => powered.then(|| 15).unwrap_or(0),
Block::RedstoneBlock {} => 15,
s if s.has_comparator_override() => node.comparator_output,
_ => 0,
},
pos: node.pos,
};
nodes.push(n);
}
fs::write("redpiler_graph.bc", bincode::serialize(&nodes).unwrap()).unwrap();
}
| 30.828125 | 98 | 0.528637 |
0aa7f3d323eeafa6f9fa1b473d633b171d22ef50 | 55,432 | use approx::{AbsDiffEq, RelativeEq, UlpsEq};
use num::Zero;
use std::fmt;
use std::hash::{Hash, Hasher};
#[cfg(feature = "serde-serialize-no-std")]
use crate::base::storage::Owned;
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use simba::scalar::{ClosedNeg, RealField};
use simba::simd::{SimdBool, SimdOption, SimdRealField};
use crate::base::dimension::{U1, U3, U4};
use crate::base::storage::{CStride, RStride};
use crate::base::{
Matrix3, Matrix4, MatrixSlice, MatrixSliceMut, Normed, Scalar, Unit, Vector3, Vector4,
};
use crate::geometry::{Point3, Rotation};
/// A quaternion. See the type alias `UnitQuaternion = Unit<Quaternion>` for a quaternion
/// that may be used as a rotation.
#[repr(C)]
#[derive(Copy, Clone)]
#[cfg_attr(
all(not(target_os = "cuda"), feature = "cuda"),
derive(cust::DeviceCopy)
)]
pub struct Quaternion<T> {
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
pub coords: Vector4<T>,
}
impl<T: fmt::Debug> fmt::Debug for Quaternion<T> {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
self.coords.as_slice().fmt(formatter)
}
}
impl<T: Scalar + Hash> Hash for Quaternion<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.coords.hash(state)
}
}
impl<T: Scalar + Eq> Eq for Quaternion<T> {}
impl<T: Scalar> PartialEq for Quaternion<T> {
#[inline]
fn eq(&self, right: &Self) -> bool {
self.coords == right.coords
}
}
impl<T: Scalar + Zero> Default for Quaternion<T> {
fn default() -> Self {
Quaternion {
coords: Vector4::zeros(),
}
}
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar> bytemuck::Zeroable for Quaternion<T> where Vector4<T>: bytemuck::Zeroable {}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar> bytemuck::Pod for Quaternion<T>
where
Vector4<T>: bytemuck::Pod,
T: Copy,
{
}
#[cfg(feature = "serde-serialize-no-std")]
impl<T: Scalar> Serialize for Quaternion<T>
where
Owned<T, U4>: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.coords.serialize(serializer)
}
}
#[cfg(feature = "serde-serialize-no-std")]
impl<'a, T: Scalar> Deserialize<'a> for Quaternion<T>
where
Owned<T, U4>: Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where
Des: Deserializer<'a>,
{
let coords = Vector4::<T>::deserialize(deserializer)?;
Ok(Self::from(coords))
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Quaternion;
use crate::base::Vector4;
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Archive> Archive for Quaternion<T> {
type Archived = Quaternion<T::Archived>;
type Resolver = <Vector4<T> as Archive>::Resolver;
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut core::mem::MaybeUninit<Self::Archived>,
) {
self.coords.resolve(
pos + offset_of!(Self::Archived, coords),
resolver,
project_struct!(out: Self::Archived => coords),
);
}
}
impl<T: Serialize<S>, S: Fallible + ?Sized> Serialize<S> for Quaternion<T> {
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
self.coords.serialize(serializer)
}
}
impl<T: Archive, D: Fallible + ?Sized> Deserialize<Quaternion<T>, D> for Quaternion<T::Archived>
where
T::Archived: Deserialize<T, D>,
{
fn deserialize(&self, deserializer: &mut D) -> Result<Quaternion<T>, D::Error> {
Ok(Quaternion {
coords: self.coords.deserialize(deserializer)?,
})
}
}
}
impl<T: SimdRealField> Quaternion<T>
where
T::Element: SimdRealField,
{
/// Moves this unit quaternion into one that owns its data.
#[inline]
#[deprecated(note = "This method is a no-op and will be removed in a future release.")]
pub fn into_owned(self) -> Self {
self
}
/// Clones this unit quaternion into one that owns its data.
#[inline]
#[deprecated(note = "This method is a no-op and will be removed in a future release.")]
pub fn clone_owned(&self) -> Self {
Self::from(self.coords.clone_owned())
}
/// Normalizes this quaternion.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let q_normalized = q.normalize();
/// relative_eq!(q_normalized.norm(), 1.0);
/// ```
#[inline]
#[must_use = "Did you mean to use normalize_mut()?"]
pub fn normalize(&self) -> Self {
Self::from(self.coords.normalize())
}
/// The imaginary part of this quaternion.
#[inline]
#[must_use]
pub fn imag(&self) -> Vector3<T> {
self.coords.xyz()
}
/// The conjugate of this quaternion.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let conj = q.conjugate();
/// assert!(conj.i == -2.0 && conj.j == -3.0 && conj.k == -4.0 && conj.w == 1.0);
/// ```
#[inline]
#[must_use = "Did you mean to use conjugate_mut()?"]
pub fn conjugate(&self) -> Self {
Self::from_parts(self.w.clone(), -self.imag())
}
/// Linear interpolation between two quaternion.
///
/// Computes `self * (1 - t) + other * t`.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q1 = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let q2 = Quaternion::new(10.0, 20.0, 30.0, 40.0);
///
/// assert_eq!(q1.lerp(&q2, 0.1), Quaternion::new(1.9, 3.8, 5.7, 7.6));
/// ```
#[inline]
#[must_use]
pub fn lerp(&self, other: &Self, t: T) -> Self {
self * (T::one() - t.clone()) + other * t
}
/// The vector part `(i, j, k)` of this quaternion.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_eq!(q.vector()[0], 2.0);
/// assert_eq!(q.vector()[1], 3.0);
/// assert_eq!(q.vector()[2], 4.0);
/// ```
#[inline]
#[must_use]
pub fn vector(&self) -> MatrixSlice<'_, T, U3, U1, RStride<T, U4, U1>, CStride<T, U4, U1>> {
self.coords.fixed_rows::<3>(0)
}
/// The scalar part `w` of this quaternion.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_eq!(q.scalar(), 1.0);
/// ```
#[inline]
#[must_use]
pub fn scalar(&self) -> T {
self.coords[3].clone()
}
/// Reinterprets this quaternion as a 4D vector.
///
/// # Example
/// ```
/// # use nalgebra::{Vector4, Quaternion};
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// // Recall that the quaternion is stored internally as (i, j, k, w)
/// // while the crate::new constructor takes the arguments as (w, i, j, k).
/// assert_eq!(*q.as_vector(), Vector4::new(2.0, 3.0, 4.0, 1.0));
/// ```
#[inline]
#[must_use]
pub fn as_vector(&self) -> &Vector4<T> {
&self.coords
}
/// The norm of this quaternion.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_relative_eq!(q.norm(), 5.47722557, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn norm(&self) -> T {
self.coords.norm()
}
/// A synonym for the norm of this quaternion.
///
/// Aka the length.
/// This is the same as `.norm()`
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_relative_eq!(q.magnitude(), 5.47722557, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn magnitude(&self) -> T {
self.norm()
}
/// The squared norm of this quaternion.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_eq!(q.magnitude_squared(), 30.0);
/// ```
#[inline]
#[must_use]
pub fn norm_squared(&self) -> T {
self.coords.norm_squared()
}
/// A synonym for the squared norm of this quaternion.
///
/// Aka the squared length.
/// This is the same as `.norm_squared()`
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_eq!(q.magnitude_squared(), 30.0);
/// ```
#[inline]
#[must_use]
pub fn magnitude_squared(&self) -> T {
self.norm_squared()
}
/// The dot product of two quaternions.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let q1 = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let q2 = Quaternion::new(5.0, 6.0, 7.0, 8.0);
/// assert_eq!(q1.dot(&q2), 70.0);
/// ```
#[inline]
#[must_use]
pub fn dot(&self, rhs: &Self) -> T {
self.coords.dot(&rhs.coords)
}
}
impl<T: SimdRealField> Quaternion<T>
where
T::Element: SimdRealField,
{
/// Inverts this quaternion if it is not zero.
///
/// This method also does not works with SIMD components (see `simd_try_inverse` instead).
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let inv_q = q.try_inverse();
///
/// assert!(inv_q.is_some());
/// assert_relative_eq!(inv_q.unwrap() * q, Quaternion::identity());
///
/// //Non-invertible case
/// let q = Quaternion::new(0.0, 0.0, 0.0, 0.0);
/// let inv_q = q.try_inverse();
///
/// assert!(inv_q.is_none());
/// ```
#[inline]
#[must_use = "Did you mean to use try_inverse_mut()?"]
pub fn try_inverse(&self) -> Option<Self>
where
T: RealField,
{
let mut res = self.clone();
if res.try_inverse_mut() {
Some(res)
} else {
None
}
}
/// Attempt to inverse this quaternion.
///
/// This method also works with SIMD components.
#[inline]
#[must_use = "Did you mean to use try_inverse_mut()?"]
pub fn simd_try_inverse(&self) -> SimdOption<Self> {
let norm_squared = self.norm_squared();
let ge = norm_squared.clone().simd_ge(T::simd_default_epsilon());
SimdOption::new(self.conjugate() / norm_squared, ge)
}
/// Calculates the inner product (also known as the dot product).
/// See "Foundations of Game Engine Development, Volume 1: Mathematics" by Lengyel
/// Formula 4.89.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let a = Quaternion::new(0.0, 2.0, 3.0, 4.0);
/// let b = Quaternion::new(0.0, 5.0, 2.0, 1.0);
/// let expected = Quaternion::new(-20.0, 0.0, 0.0, 0.0);
/// let result = a.inner(&b);
/// assert_relative_eq!(expected, result, epsilon = 1.0e-5);
#[inline]
#[must_use]
pub fn inner(&self, other: &Self) -> Self {
(self * other + other * self).half()
}
/// Calculates the outer product (also known as the wedge product).
/// See "Foundations of Game Engine Development, Volume 1: Mathematics" by Lengyel
/// Formula 4.89.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let a = Quaternion::new(0.0, 2.0, 3.0, 4.0);
/// let b = Quaternion::new(0.0, 5.0, 2.0, 1.0);
/// let expected = Quaternion::new(0.0, -5.0, 18.0, -11.0);
/// let result = a.outer(&b);
/// assert_relative_eq!(expected, result, epsilon = 1.0e-5);
/// ```
#[inline]
#[must_use]
pub fn outer(&self, other: &Self) -> Self {
#[allow(clippy::eq_op)]
(self * other - other * self).half()
}
/// Calculates the projection of `self` onto `other` (also known as the parallel).
/// See "Foundations of Game Engine Development, Volume 1: Mathematics" by Lengyel
/// Formula 4.94.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let a = Quaternion::new(0.0, 2.0, 3.0, 4.0);
/// let b = Quaternion::new(0.0, 5.0, 2.0, 1.0);
/// let expected = Quaternion::new(0.0, 3.333333333333333, 1.3333333333333333, 0.6666666666666666);
/// let result = a.project(&b).unwrap();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-5);
/// ```
#[inline]
#[must_use]
pub fn project(&self, other: &Self) -> Option<Self>
where
T: RealField,
{
self.inner(other).right_div(other)
}
/// Calculates the rejection of `self` from `other` (also known as the perpendicular).
/// See "Foundations of Game Engine Development, Volume 1: Mathematics" by Lengyel
/// Formula 4.94.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let a = Quaternion::new(0.0, 2.0, 3.0, 4.0);
/// let b = Quaternion::new(0.0, 5.0, 2.0, 1.0);
/// let expected = Quaternion::new(0.0, -1.3333333333333333, 1.6666666666666665, 3.3333333333333335);
/// let result = a.reject(&b).unwrap();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-5);
/// ```
#[inline]
#[must_use]
pub fn reject(&self, other: &Self) -> Option<Self>
where
T: RealField,
{
self.outer(other).right_div(other)
}
/// The polar decomposition of this quaternion.
///
/// Returns, from left to right: the quaternion norm, the half rotation angle, the rotation
/// axis. If the rotation angle is zero, the rotation axis is set to `None`.
///
/// # Example
/// ```
/// # use std::f32;
/// # use nalgebra::{Vector3, Quaternion};
/// let q = Quaternion::new(0.0, 5.0, 0.0, 0.0);
/// let (norm, half_ang, axis) = q.polar_decomposition();
/// assert_eq!(norm, 5.0);
/// assert_eq!(half_ang, f32::consts::FRAC_PI_2);
/// assert_eq!(axis, Some(Vector3::x_axis()));
/// ```
#[must_use]
pub fn polar_decomposition(&self) -> (T, T, Option<Unit<Vector3<T>>>)
where
T: RealField,
{
if let Some((q, n)) = Unit::try_new_and_get(self.clone(), T::zero()) {
if let Some(axis) = Unit::try_new(self.vector().clone_owned(), T::zero()) {
let angle = q.angle() / crate::convert(2.0f64);
(n, angle, Some(axis))
} else {
(n, T::zero(), None)
}
} else {
(T::zero(), T::zero(), None)
}
}
/// Compute the natural logarithm of a quaternion.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(2.0, 5.0, 0.0, 0.0);
/// assert_relative_eq!(q.ln(), Quaternion::new(1.683647, 1.190289, 0.0, 0.0), epsilon = 1.0e-6)
/// ```
#[inline]
#[must_use]
pub fn ln(&self) -> Self {
let n = self.norm();
let v = self.vector();
let s = self.scalar();
Self::from_parts(n.clone().simd_ln(), v.normalize() * (s / n).simd_acos())
}
/// Compute the exponential of a quaternion.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.683647, 1.190289, 0.0, 0.0);
/// assert_relative_eq!(q.exp(), Quaternion::new(2.0, 5.0, 0.0, 0.0), epsilon = 1.0e-5)
/// ```
#[inline]
#[must_use]
pub fn exp(&self) -> Self {
self.exp_eps(T::simd_default_epsilon())
}
/// Compute the exponential of a quaternion. Returns the identity if the vector part of this quaternion
/// has a norm smaller than `eps`.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.683647, 1.190289, 0.0, 0.0);
/// assert_relative_eq!(q.exp_eps(1.0e-6), Quaternion::new(2.0, 5.0, 0.0, 0.0), epsilon = 1.0e-5);
///
/// // Singular case.
/// let q = Quaternion::new(0.0000001, 0.0, 0.0, 0.0);
/// assert_eq!(q.exp_eps(1.0e-6), Quaternion::identity());
/// ```
#[inline]
#[must_use]
pub fn exp_eps(&self, eps: T) -> Self {
let v = self.vector();
let nn = v.norm_squared();
let le = nn.clone().simd_le(eps.clone() * eps);
le.if_else(Self::identity, || {
let w_exp = self.scalar().simd_exp();
let n = nn.simd_sqrt();
let nv = v * (w_exp.clone() * n.clone().simd_sin() / n.clone());
Self::from_parts(w_exp * n.simd_cos(), nv)
})
}
/// Raise the quaternion to a given floating power.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// assert_relative_eq!(q.powf(1.5), Quaternion::new( -6.2576659, 4.1549037, 6.2323556, 8.3098075), epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn powf(&self, n: T) -> Self {
(self.ln() * n).exp()
}
/// Transforms this quaternion into its 4D vector form (Vector part, Scalar part).
///
/// # Example
/// ```
/// # use nalgebra::{Quaternion, Vector4};
/// let mut q = Quaternion::identity();
/// *q.as_vector_mut() = Vector4::new(1.0, 2.0, 3.0, 4.0);
/// assert!(q.i == 1.0 && q.j == 2.0 && q.k == 3.0 && q.w == 4.0);
/// ```
#[inline]
pub fn as_vector_mut(&mut self) -> &mut Vector4<T> {
&mut self.coords
}
/// The mutable vector part `(i, j, k)` of this quaternion.
///
/// # Example
/// ```
/// # use nalgebra::{Quaternion, Vector4};
/// let mut q = Quaternion::identity();
/// {
/// let mut v = q.vector_mut();
/// v[0] = 2.0;
/// v[1] = 3.0;
/// v[2] = 4.0;
/// }
/// assert!(q.i == 2.0 && q.j == 3.0 && q.k == 4.0 && q.w == 1.0);
/// ```
#[inline]
pub fn vector_mut(
&mut self,
) -> MatrixSliceMut<'_, T, U3, U1, RStride<T, U4, U1>, CStride<T, U4, U1>> {
self.coords.fixed_rows_mut::<3>(0)
}
/// Replaces this quaternion by its conjugate.
///
/// # Example
/// ```
/// # use nalgebra::Quaternion;
/// let mut q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// q.conjugate_mut();
/// assert!(q.i == -2.0 && q.j == -3.0 && q.k == -4.0 && q.w == 1.0);
/// ```
#[inline]
pub fn conjugate_mut(&mut self) {
self.coords[0] = -self.coords[0].clone();
self.coords[1] = -self.coords[1].clone();
self.coords[2] = -self.coords[2].clone();
}
/// Inverts this quaternion in-place if it is not zero.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let mut q = Quaternion::new(1.0f32, 2.0, 3.0, 4.0);
///
/// assert!(q.try_inverse_mut());
/// assert_relative_eq!(q * Quaternion::new(1.0, 2.0, 3.0, 4.0), Quaternion::identity());
///
/// //Non-invertible case
/// let mut q = Quaternion::new(0.0f32, 0.0, 0.0, 0.0);
/// assert!(!q.try_inverse_mut());
/// ```
#[inline]
pub fn try_inverse_mut(&mut self) -> T::SimdBool {
let norm_squared = self.norm_squared();
let ge = norm_squared.clone().simd_ge(T::simd_default_epsilon());
*self = ge.if_else(|| self.conjugate() / norm_squared, || self.clone());
ge
}
/// Normalizes this quaternion.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let mut q = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// q.normalize_mut();
/// assert_relative_eq!(q.norm(), 1.0);
/// ```
#[inline]
pub fn normalize_mut(&mut self) -> T {
self.coords.normalize_mut()
}
/// Calculates square of a quaternion.
#[inline]
#[must_use]
pub fn squared(&self) -> Self {
self * self
}
/// Divides quaternion into two.
#[inline]
#[must_use]
pub fn half(&self) -> Self {
self / crate::convert(2.0f64)
}
/// Calculates square root.
#[inline]
#[must_use]
pub fn sqrt(&self) -> Self {
self.powf(crate::convert(0.5))
}
/// Check if the quaternion is pure.
///
/// A quaternion is pure if it has no real part (`self.w == 0.0`).
#[inline]
#[must_use]
pub fn is_pure(&self) -> bool {
self.w.is_zero()
}
/// Convert quaternion to pure quaternion.
#[inline]
#[must_use]
pub fn pure(&self) -> Self {
Self::from_imag(self.imag())
}
/// Left quaternionic division.
///
/// Calculates B<sup>-1</sup> * A where A = self, B = other.
#[inline]
#[must_use]
pub fn left_div(&self, other: &Self) -> Option<Self>
where
T: RealField,
{
other.try_inverse().map(|inv| inv * self)
}
/// Right quaternionic division.
///
/// Calculates A * B<sup>-1</sup> where A = self, B = other.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let a = Quaternion::new(0.0, 1.0, 2.0, 3.0);
/// let b = Quaternion::new(0.0, 5.0, 2.0, 1.0);
/// let result = a.right_div(&b).unwrap();
/// let expected = Quaternion::new(0.4, 0.13333333333333336, -0.4666666666666667, 0.26666666666666666);
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn right_div(&self, other: &Self) -> Option<Self>
where
T: RealField,
{
other.try_inverse().map(|inv| self * inv)
}
/// Calculates the quaternionic cosinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(58.93364616794395, -34.086183690465596, -51.1292755356984, -68.17236738093119);
/// let result = input.cos();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn cos(&self) -> Self {
let z = self.imag().magnitude();
let w = -self.w.clone().simd_sin() * z.clone().simd_sinhc();
Self::from_parts(self.w.clone().simd_cos() * z.simd_cosh(), self.imag() * w)
}
/// Calculates the quaternionic arccosinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let result = input.cos().acos();
/// assert_relative_eq!(input, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn acos(&self) -> Self {
let u = Self::from_imag(self.imag().normalize());
let identity = Self::identity();
let z = (self + (self.squared() - identity).sqrt()).ln();
-(u * z)
}
/// Calculates the quaternionic sinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(91.78371578403467, 21.886486853029176, 32.82973027954377, 43.77297370605835);
/// let result = input.sin();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn sin(&self) -> Self {
let z = self.imag().magnitude();
let w = self.w.clone().simd_cos() * z.clone().simd_sinhc();
Self::from_parts(self.w.clone().simd_sin() * z.simd_cosh(), self.imag() * w)
}
/// Calculates the quaternionic arcsinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let result = input.sin().asin();
/// assert_relative_eq!(input, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn asin(&self) -> Self {
let u = Self::from_imag(self.imag().normalize());
let identity = Self::identity();
let z = ((u.clone() * self) + (identity - self.squared()).sqrt()).ln();
-(u * z)
}
/// Calculates the quaternionic tangent.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(0.00003821631725009489, 0.3713971716439371, 0.5570957574659058, 0.7427943432878743);
/// let result = input.tan();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn tan(&self) -> Self
where
T: RealField,
{
self.sin().right_div(&self.cos()).unwrap()
}
/// Calculates the quaternionic arctangent.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let result = input.tan().atan();
/// assert_relative_eq!(input, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn atan(&self) -> Self
where
T: RealField,
{
let u = Self::from_imag(self.imag().normalize());
let num = u.clone() + self;
let den = u.clone() - self;
let fr = num.right_div(&den).unwrap();
let ln = fr.ln();
(u.half()) * ln
}
/// Calculates the hyperbolic quaternionic sinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(0.7323376060463428, -0.4482074499805421, -0.6723111749708133, -0.8964148999610843);
/// let result = input.sinh();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn sinh(&self) -> Self {
(self.exp() - (-self).exp()).half()
}
/// Calculates the hyperbolic quaternionic arcsinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(2.385889902585242, 0.514052600662788, 0.7710789009941821, 1.028105201325576);
/// let result = input.asinh();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn asinh(&self) -> Self {
let identity = Self::identity();
(self + (identity + self.squared()).sqrt()).ln()
}
/// Calculates the hyperbolic quaternionic cosinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(0.9615851176369566, -0.3413521745610167, -0.5120282618415251, -0.6827043491220334);
/// let result = input.cosh();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn cosh(&self) -> Self {
(self.exp() + (-self).exp()).half()
}
/// Calculates the hyperbolic quaternionic arccosinus.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(2.4014472020074007, 0.5162761016176176, 0.7744141524264264, 1.0325522032352352);
/// let result = input.acosh();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn acosh(&self) -> Self {
let identity = Self::identity();
(self + (self + identity.clone()).sqrt() * (self - identity).sqrt()).ln()
}
/// Calculates the hyperbolic quaternionic tangent.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(1.0248695360556623, -0.10229568178876419, -0.1534435226831464, -0.20459136357752844);
/// let result = input.tanh();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn tanh(&self) -> Self
where
T: RealField,
{
self.sinh().right_div(&self.cosh()).unwrap()
}
/// Calculates the hyperbolic quaternionic arctangent.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Quaternion;
/// let input = Quaternion::new(1.0, 2.0, 3.0, 4.0);
/// let expected = Quaternion::new(0.03230293287000163, 0.5173453683196951, 0.7760180524795426, 1.0346907366393903);
/// let result = input.atanh();
/// assert_relative_eq!(expected, result, epsilon = 1.0e-7);
/// ```
#[inline]
#[must_use]
pub fn atanh(&self) -> Self {
let identity = Self::identity();
((identity.clone() + self).ln() - (identity - self).ln()).half()
}
}
impl<T: RealField + AbsDiffEq<Epsilon = T>> AbsDiffEq for Quaternion<T> {
type Epsilon = T;
#[inline]
fn default_epsilon() -> Self::Epsilon {
T::default_epsilon()
}
#[inline]
fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
self.as_vector().abs_diff_eq(other.as_vector(), epsilon.clone()) ||
// Account for the double-covering of S², i.e. q = -q
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-b.clone(), epsilon.clone()))
}
}
impl<T: RealField + RelativeEq<Epsilon = T>> RelativeEq for Quaternion<T> {
#[inline]
fn default_max_relative() -> Self::Epsilon {
T::default_max_relative()
}
#[inline]
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.as_vector().relative_eq(other.as_vector(), epsilon.clone(), max_relative.clone()) ||
// Account for the double-covering of S², i.e. q = -q
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-b.clone(), epsilon.clone(), max_relative.clone()))
}
}
impl<T: RealField + UlpsEq<Epsilon = T>> UlpsEq for Quaternion<T> {
#[inline]
fn default_max_ulps() -> u32 {
T::default_max_ulps()
}
#[inline]
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.as_vector().ulps_eq(other.as_vector(), epsilon.clone(), max_ulps) ||
// Account for the double-covering of S², i.e. q = -q.
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps))
}
}
impl<T: RealField + fmt::Display> fmt::Display for Quaternion<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"Quaternion {} − ({}, {}, {})",
self[3], self[0], self[1], self[2]
)
}
}
/// A unit quaternions. May be used to represent a rotation.
pub type UnitQuaternion<T> = Unit<Quaternion<T>>;
#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
unsafe impl<T: cust::memory::DeviceCopy> cust::memory::DeviceCopy for UnitQuaternion<T> {}
impl<T: Scalar + ClosedNeg + PartialEq> PartialEq for UnitQuaternion<T> {
#[inline]
fn eq(&self, rhs: &Self) -> bool {
self.coords == rhs.coords ||
// Account for the double-covering of S², i.e. q = -q
self.coords.iter().zip(rhs.coords.iter()).all(|(a, b)| *a == -b.clone())
}
}
impl<T: Scalar + ClosedNeg + Eq> Eq for UnitQuaternion<T> {}
impl<T: SimdRealField> Normed for Quaternion<T> {
type Norm = T::SimdRealField;
#[inline]
fn norm(&self) -> T::SimdRealField {
self.coords.norm()
}
#[inline]
fn norm_squared(&self) -> T::SimdRealField {
self.coords.norm_squared()
}
#[inline]
fn scale_mut(&mut self, n: Self::Norm) {
self.coords.scale_mut(n)
}
#[inline]
fn unscale_mut(&mut self, n: Self::Norm) {
self.coords.unscale_mut(n)
}
}
impl<T: SimdRealField> UnitQuaternion<T>
where
T::Element: SimdRealField,
{
/// The rotation angle in [0; pi] of this unit quaternion.
///
/// # Example
/// ```
/// # use nalgebra::{Unit, UnitQuaternion, Vector3};
/// let axis = Unit::new_normalize(Vector3::new(1.0, 2.0, 3.0));
/// let rot = UnitQuaternion::from_axis_angle(&axis, 1.78);
/// assert_eq!(rot.angle(), 1.78);
/// ```
#[inline]
#[must_use]
pub fn angle(&self) -> T {
let w = self.quaternion().scalar().simd_abs();
self.quaternion().imag().norm().simd_atan2(w) * crate::convert(2.0f64)
}
/// The underlying quaternion.
///
/// Same as `self.as_ref()`.
///
/// # Example
/// ```
/// # use nalgebra::{UnitQuaternion, Quaternion};
/// let axis = UnitQuaternion::identity();
/// assert_eq!(*axis.quaternion(), Quaternion::new(1.0, 0.0, 0.0, 0.0));
/// ```
#[inline]
#[must_use]
pub fn quaternion(&self) -> &Quaternion<T> {
self.as_ref()
}
/// Compute the conjugate of this unit quaternion.
///
/// # Example
/// ```
/// # use nalgebra::{Unit, UnitQuaternion, Vector3};
/// let axis = Unit::new_normalize(Vector3::new(1.0, 2.0, 3.0));
/// let rot = UnitQuaternion::from_axis_angle(&axis, 1.78);
/// let conj = rot.conjugate();
/// assert_eq!(conj, UnitQuaternion::from_axis_angle(&-axis, 1.78));
/// ```
#[inline]
#[must_use = "Did you mean to use conjugate_mut()?"]
pub fn conjugate(&self) -> Self {
Self::new_unchecked(self.as_ref().conjugate())
}
/// Inverts this quaternion if it is not zero.
///
/// # Example
/// ```
/// # use nalgebra::{Unit, UnitQuaternion, Vector3};
/// let axis = Unit::new_normalize(Vector3::new(1.0, 2.0, 3.0));
/// let rot = UnitQuaternion::from_axis_angle(&axis, 1.78);
/// let inv = rot.inverse();
/// assert_eq!(rot * inv, UnitQuaternion::identity());
/// assert_eq!(inv * rot, UnitQuaternion::identity());
/// ```
#[inline]
#[must_use = "Did you mean to use inverse_mut()?"]
pub fn inverse(&self) -> Self {
self.conjugate()
}
/// The rotation angle needed to make `self` and `other` coincide.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitQuaternion, Vector3};
/// let rot1 = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 1.0);
/// let rot2 = UnitQuaternion::from_axis_angle(&Vector3::x_axis(), 0.1);
/// assert_relative_eq!(rot1.angle_to(&rot2), 1.0045657, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn angle_to(&self, other: &Self) -> T {
let delta = self.rotation_to(other);
delta.angle()
}
/// The unit quaternion needed to make `self` and `other` coincide.
///
/// The result is such that: `self.rotation_to(other) * self == other`.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitQuaternion, Vector3};
/// let rot1 = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 1.0);
/// let rot2 = UnitQuaternion::from_axis_angle(&Vector3::x_axis(), 0.1);
/// let rot_to = rot1.rotation_to(&rot2);
/// assert_relative_eq!(rot_to * rot1, rot2, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn rotation_to(&self, other: &Self) -> Self {
other / self
}
/// Linear interpolation between two unit quaternions.
///
/// The result is not normalized.
///
/// # Example
/// ```
/// # use nalgebra::{UnitQuaternion, Quaternion};
/// let q1 = UnitQuaternion::new_normalize(Quaternion::new(1.0, 0.0, 0.0, 0.0));
/// let q2 = UnitQuaternion::new_normalize(Quaternion::new(0.0, 1.0, 0.0, 0.0));
/// assert_eq!(q1.lerp(&q2, 0.1), Quaternion::new(0.9, 0.1, 0.0, 0.0));
/// ```
#[inline]
#[must_use]
pub fn lerp(&self, other: &Self, t: T) -> Quaternion<T> {
self.as_ref().lerp(other.as_ref(), t)
}
/// Normalized linear interpolation between two unit quaternions.
///
/// This is the same as `self.lerp` except that the result is normalized.
///
/// # Example
/// ```
/// # use nalgebra::{UnitQuaternion, Quaternion};
/// let q1 = UnitQuaternion::new_normalize(Quaternion::new(1.0, 0.0, 0.0, 0.0));
/// let q2 = UnitQuaternion::new_normalize(Quaternion::new(0.0, 1.0, 0.0, 0.0));
/// assert_eq!(q1.nlerp(&q2, 0.1), UnitQuaternion::new_normalize(Quaternion::new(0.9, 0.1, 0.0, 0.0)));
/// ```
#[inline]
#[must_use]
pub fn nlerp(&self, other: &Self, t: T) -> Self {
let mut res = self.lerp(other, t);
let _ = res.normalize_mut();
Self::new_unchecked(res)
}
/// Spherical linear interpolation between two unit quaternions.
///
/// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation
/// is not well-defined). Use `.try_slerp` instead to avoid the panic.
///
/// # Examples:
///
/// ```
/// # use nalgebra::geometry::UnitQuaternion;
///
/// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0);
/// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0);
///
/// let q = q1.slerp(&q2, 1.0 / 3.0);
///
/// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0));
/// ```
#[inline]
#[must_use]
pub fn slerp(&self, other: &Self, t: T) -> Self
where
T: RealField,
{
self.try_slerp(other, t, T::default_epsilon())
.expect("Quaternion slerp: ambiguous configuration.")
}
/// Computes the spherical linear interpolation between two unit quaternions or returns `None`
/// if both quaternions are approximately 180 degrees apart (in which case the interpolation is
/// not well-defined).
///
/// # Arguments
/// * `self`: the first quaternion to interpolate from.
/// * `other`: the second quaternion to interpolate toward.
/// * `t`: the interpolation parameter. Should be between 0 and 1.
/// * `epsilon`: the value below which the sinus of the angle separating both quaternion
/// must be to return `None`.
#[inline]
#[must_use]
pub fn try_slerp(&self, other: &Self, t: T, epsilon: T) -> Option<Self>
where
T: RealField,
{
let coords = if self.coords.dot(&other.coords) < T::zero() {
Unit::new_unchecked(self.coords.clone()).try_slerp(
&Unit::new_unchecked(-other.coords.clone()),
t,
epsilon,
)
} else {
Unit::new_unchecked(self.coords.clone()).try_slerp(
&Unit::new_unchecked(other.coords.clone()),
t,
epsilon,
)
};
coords.map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner())))
}
/// Compute the conjugate of this unit quaternion in-place.
#[inline]
pub fn conjugate_mut(&mut self) {
self.as_mut_unchecked().conjugate_mut()
}
/// Inverts this quaternion if it is not zero.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitQuaternion, Vector3, Unit};
/// let axisangle = Vector3::new(0.1, 0.2, 0.3);
/// let mut rot = UnitQuaternion::new(axisangle);
/// rot.inverse_mut();
/// assert_relative_eq!(rot * UnitQuaternion::new(axisangle), UnitQuaternion::identity());
/// assert_relative_eq!(UnitQuaternion::new(axisangle) * rot, UnitQuaternion::identity());
/// ```
#[inline]
pub fn inverse_mut(&mut self) {
self.as_mut_unchecked().conjugate_mut()
}
/// The rotation axis of this unit quaternion or `None` if the rotation is zero.
///
/// # Example
/// ```
/// # use nalgebra::{UnitQuaternion, Vector3, Unit};
/// let axis = Unit::new_normalize(Vector3::new(1.0, 2.0, 3.0));
/// let angle = 1.2;
/// let rot = UnitQuaternion::from_axis_angle(&axis, angle);
/// assert_eq!(rot.axis(), Some(axis));
///
/// // Case with a zero angle.
/// let rot = UnitQuaternion::from_axis_angle(&axis, 0.0);
/// assert!(rot.axis().is_none());
/// ```
#[inline]
#[must_use]
pub fn axis(&self) -> Option<Unit<Vector3<T>>>
where
T: RealField,
{
let v = if self.quaternion().scalar() >= T::zero() {
self.as_ref().vector().clone_owned()
} else {
-self.as_ref().vector()
};
Unit::try_new(v, T::zero())
}
/// The rotation axis of this unit quaternion multiplied by the rotation angle.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitQuaternion, Vector3, Unit};
/// let axisangle = Vector3::new(0.1, 0.2, 0.3);
/// let rot = UnitQuaternion::new(axisangle);
/// assert_relative_eq!(rot.scaled_axis(), axisangle, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn scaled_axis(&self) -> Vector3<T>
where
T: RealField,
{
if let Some(axis) = self.axis() {
axis.into_inner() * self.angle()
} else {
Vector3::zero()
}
}
/// The rotation axis and angle in ]0, pi] of this unit quaternion.
///
/// Returns `None` if the angle is zero.
///
/// # Example
/// ```
/// # use nalgebra::{UnitQuaternion, Vector3, Unit};
/// let axis = Unit::new_normalize(Vector3::new(1.0, 2.0, 3.0));
/// let angle = 1.2;
/// let rot = UnitQuaternion::from_axis_angle(&axis, angle);
/// assert_eq!(rot.axis_angle(), Some((axis, angle)));
///
/// // Case with a zero angle.
/// let rot = UnitQuaternion::from_axis_angle(&axis, 0.0);
/// assert!(rot.axis_angle().is_none());
/// ```
#[inline]
#[must_use]
pub fn axis_angle(&self) -> Option<(Unit<Vector3<T>>, T)>
where
T: RealField,
{
self.axis().map(|axis| (axis, self.angle()))
}
/// Compute the exponential of a quaternion.
///
/// Note that this function yields a `Quaternion<T>` because it loses the unit property.
#[inline]
#[must_use]
pub fn exp(&self) -> Quaternion<T> {
self.as_ref().exp()
}
/// Compute the natural logarithm of a quaternion.
///
/// Note that this function yields a `Quaternion<T>` because it loses the unit property.
/// The vector part of the return value corresponds to the axis-angle representation (divided
/// by 2.0) of this unit quaternion.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Vector3, UnitQuaternion};
/// let axisangle = Vector3::new(0.1, 0.2, 0.3);
/// let q = UnitQuaternion::new(axisangle);
/// assert_relative_eq!(q.ln().vector().into_owned(), axisangle, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn ln(&self) -> Quaternion<T>
where
T: RealField,
{
if let Some(v) = self.axis() {
Quaternion::from_imag(v.into_inner() * self.angle())
} else {
Quaternion::zero()
}
}
/// Raise the quaternion to a given floating power.
///
/// This returns the unit quaternion that identifies a rotation with axis `self.axis()` and
/// angle `self.angle() × n`.
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitQuaternion, Vector3, Unit};
/// let axis = Unit::new_normalize(Vector3::new(1.0, 2.0, 3.0));
/// let angle = 1.2;
/// let rot = UnitQuaternion::from_axis_angle(&axis, angle);
/// let pow = rot.powf(2.0);
/// assert_relative_eq!(pow.axis().unwrap(), axis, epsilon = 1.0e-6);
/// assert_eq!(pow.angle(), 2.4);
/// ```
#[inline]
#[must_use]
pub fn powf(&self, n: T) -> Self
where
T: RealField,
{
if let Some(v) = self.axis() {
Self::from_axis_angle(&v, self.angle() * n)
} else {
Self::identity()
}
}
/// Builds a rotation matrix from this unit quaternion.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3, Matrix3};
/// let q = UnitQuaternion::from_axis_angle(&Vector3::z_axis(), f32::consts::FRAC_PI_6);
/// let rot = q.to_rotation_matrix();
/// let expected = Matrix3::new(0.8660254, -0.5, 0.0,
/// 0.5, 0.8660254, 0.0,
/// 0.0, 0.0, 1.0);
///
/// assert_relative_eq!(*rot.matrix(), expected, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn to_rotation_matrix(self) -> Rotation<T, 3> {
let i = self.as_ref()[0].clone();
let j = self.as_ref()[1].clone();
let k = self.as_ref()[2].clone();
let w = self.as_ref()[3].clone();
let ww = w.clone() * w.clone();
let ii = i.clone() * i.clone();
let jj = j.clone() * j.clone();
let kk = k.clone() * k.clone();
let ij = i.clone() * j.clone() * crate::convert(2.0f64);
let wk = w.clone() * k.clone() * crate::convert(2.0f64);
let wj = w.clone() * j.clone() * crate::convert(2.0f64);
let ik = i.clone() * k.clone() * crate::convert(2.0f64);
let jk = j * k * crate::convert(2.0f64);
let wi = w * i * crate::convert(2.0f64);
Rotation::from_matrix_unchecked(Matrix3::new(
ww.clone() + ii.clone() - jj.clone() - kk.clone(),
ij.clone() - wk.clone(),
wj.clone() + ik.clone(),
wk + ij,
ww.clone() - ii.clone() + jj.clone() - kk.clone(),
jk.clone() - wi.clone(),
ik - wj,
wi + jk,
ww - ii - jj + kk,
))
}
/// Converts this unit quaternion into its equivalent Euler angles.
///
/// The angles are produced in the form (roll, pitch, yaw).
#[inline]
#[deprecated(note = "This is renamed to use `.euler_angles()`.")]
pub fn to_euler_angles(self) -> (T, T, T)
where
T: RealField,
{
self.euler_angles()
}
/// Retrieves the euler angles corresponding to this unit quaternion.
///
/// The angles are produced in the form (roll, pitch, yaw).
///
/// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::UnitQuaternion;
/// let rot = UnitQuaternion::from_euler_angles(0.1, 0.2, 0.3);
/// let euler = rot.euler_angles();
/// assert_relative_eq!(euler.0, 0.1, epsilon = 1.0e-6);
/// assert_relative_eq!(euler.1, 0.2, epsilon = 1.0e-6);
/// assert_relative_eq!(euler.2, 0.3, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn euler_angles(&self) -> (T, T, T)
where
T: RealField,
{
self.clone().to_rotation_matrix().euler_angles()
}
/// Converts this unit quaternion into its equivalent homogeneous transformation matrix.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3, Matrix4};
/// let rot = UnitQuaternion::from_axis_angle(&Vector3::z_axis(), f32::consts::FRAC_PI_6);
/// let expected = Matrix4::new(0.8660254, -0.5, 0.0, 0.0,
/// 0.5, 0.8660254, 0.0, 0.0,
/// 0.0, 0.0, 1.0, 0.0,
/// 0.0, 0.0, 0.0, 1.0);
///
/// assert_relative_eq!(rot.to_homogeneous(), expected, epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn to_homogeneous(self) -> Matrix4<T> {
self.to_rotation_matrix().to_homogeneous()
}
/// Rotate a point by this unit quaternion.
///
/// This is the same as the multiplication `self * pt`.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3, Point3};
/// let rot = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), f32::consts::FRAC_PI_2);
/// let transformed_point = rot.transform_point(&Point3::new(1.0, 2.0, 3.0));
///
/// assert_relative_eq!(transformed_point, Point3::new(3.0, 2.0, -1.0), epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn transform_point(&self, pt: &Point3<T>) -> Point3<T> {
self * pt
}
/// Rotate a vector by this unit quaternion.
///
/// This is the same as the multiplication `self * v`.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3};
/// let rot = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), f32::consts::FRAC_PI_2);
/// let transformed_vector = rot.transform_vector(&Vector3::new(1.0, 2.0, 3.0));
///
/// assert_relative_eq!(transformed_vector, Vector3::new(3.0, 2.0, -1.0), epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn transform_vector(&self, v: &Vector3<T>) -> Vector3<T> {
self * v
}
/// Rotate a point by the inverse of this unit quaternion. This may be
/// cheaper than inverting the unit quaternion and transforming the
/// point.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3, Point3};
/// let rot = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), f32::consts::FRAC_PI_2);
/// let transformed_point = rot.inverse_transform_point(&Point3::new(1.0, 2.0, 3.0));
///
/// assert_relative_eq!(transformed_point, Point3::new(-3.0, 2.0, 1.0), epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn inverse_transform_point(&self, pt: &Point3<T>) -> Point3<T> {
// TODO: would it be useful performancewise not to call inverse explicitly (i-e. implement
// the inverse transformation explicitly here) ?
self.inverse() * pt
}
/// Rotate a vector by the inverse of this unit quaternion. This may be
/// cheaper than inverting the unit quaternion and transforming the
/// vector.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3};
/// let rot = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), f32::consts::FRAC_PI_2);
/// let transformed_vector = rot.inverse_transform_vector(&Vector3::new(1.0, 2.0, 3.0));
///
/// assert_relative_eq!(transformed_vector, Vector3::new(-3.0, 2.0, 1.0), epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn inverse_transform_vector(&self, v: &Vector3<T>) -> Vector3<T> {
self.inverse() * v
}
/// Rotate a vector by the inverse of this unit quaternion. This may be
/// cheaper than inverting the unit quaternion and transforming the
/// vector.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate approx;
/// # use std::f32;
/// # use nalgebra::{UnitQuaternion, Vector3};
/// let rot = UnitQuaternion::from_axis_angle(&Vector3::z_axis(), f32::consts::FRAC_PI_2);
/// let transformed_vector = rot.inverse_transform_unit_vector(&Vector3::x_axis());
///
/// assert_relative_eq!(transformed_vector, -Vector3::y_axis(), epsilon = 1.0e-6);
/// ```
#[inline]
#[must_use]
pub fn inverse_transform_unit_vector(&self, v: &Unit<Vector3<T>>) -> Unit<Vector3<T>> {
self.inverse() * v
}
/// Appends to `self` a rotation given in the axis-angle form, using a linearized formulation.
///
/// This is faster, but approximate, way to compute `UnitQuaternion::new(axisangle) * self`.
#[inline]
#[must_use]
pub fn append_axisangle_linearized(&self, axisangle: &Vector3<T>) -> Self {
let half: T = crate::convert(0.5);
let q1 = self.clone().into_inner();
let q2 = Quaternion::from_imag(axisangle * half);
Unit::new_normalize(&q1 + q2 * &q1)
}
}
impl<T: RealField> Default for UnitQuaternion<T> {
fn default() -> Self {
Self::identity()
}
}
impl<T: RealField + fmt::Display> fmt::Display for UnitQuaternion<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(axis) = self.axis() {
let axis = axis.into_inner();
write!(
f,
"UnitQuaternion angle: {} − axis: ({}, {}, {})",
self.angle(),
axis[0],
axis[1],
axis[2]
)
} else {
write!(
f,
"UnitQuaternion angle: {} − axis: (undefined)",
self.angle()
)
}
}
}
impl<T: RealField + AbsDiffEq<Epsilon = T>> AbsDiffEq for UnitQuaternion<T> {
type Epsilon = T;
#[inline]
fn default_epsilon() -> Self::Epsilon {
T::default_epsilon()
}
#[inline]
fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
self.as_ref().abs_diff_eq(other.as_ref(), epsilon)
}
}
impl<T: RealField + RelativeEq<Epsilon = T>> RelativeEq for UnitQuaternion<T> {
#[inline]
fn default_max_relative() -> Self::Epsilon {
T::default_max_relative()
}
#[inline]
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.as_ref()
.relative_eq(other.as_ref(), epsilon, max_relative)
}
}
impl<T: RealField + UlpsEq<Epsilon = T>> UlpsEq for UnitQuaternion<T> {
#[inline]
fn default_max_ulps() -> u32 {
T::default_max_ulps()
}
#[inline]
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.as_ref().ulps_eq(other.as_ref(), epsilon, max_ulps)
}
}
| 31.693539 | 141 | 0.548799 |
8afa9c1fe7efdd7f36f9423721fe53001b4b7122 | 6,661 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::Buildable;
use crate::TextTag;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "GtkTextTagTable")]
pub struct TextTagTable(Object<ffi::GtkTextTagTable, ffi::GtkTextTagTableClass>) @implements Buildable;
match fn {
type_ => || ffi::gtk_text_tag_table_get_type(),
}
}
impl TextTagTable {
#[doc(alias = "gtk_text_tag_table_new")]
pub fn new() -> TextTagTable {
assert_initialized_main_thread!();
unsafe { from_glib_full(ffi::gtk_text_tag_table_new()) }
}
}
impl Default for TextTagTable {
fn default() -> Self {
Self::new()
}
}
pub const NONE_TEXT_TAG_TABLE: Option<&TextTagTable> = None;
pub trait TextTagTableExt: 'static {
#[doc(alias = "gtk_text_tag_table_add")]
fn add<P: IsA<TextTag>>(&self, tag: &P) -> bool;
#[doc(alias = "gtk_text_tag_table_foreach")]
fn foreach<P: FnMut(&TextTag)>(&self, func: P);
#[doc(alias = "gtk_text_tag_table_get_size")]
#[doc(alias = "get_size")]
fn size(&self) -> i32;
#[doc(alias = "gtk_text_tag_table_lookup")]
fn lookup(&self, name: &str) -> Option<TextTag>;
#[doc(alias = "gtk_text_tag_table_remove")]
fn remove<P: IsA<TextTag>>(&self, tag: &P);
#[doc(alias = "tag-added")]
fn connect_tag_added<F: Fn(&Self, &TextTag) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "tag-changed")]
fn connect_tag_changed<F: Fn(&Self, &TextTag, bool) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "tag-removed")]
fn connect_tag_removed<F: Fn(&Self, &TextTag) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<TextTagTable>> TextTagTableExt for O {
fn add<P: IsA<TextTag>>(&self, tag: &P) -> bool {
unsafe {
from_glib(ffi::gtk_text_tag_table_add(
self.as_ref().to_glib_none().0,
tag.as_ref().to_glib_none().0,
))
}
}
fn foreach<P: FnMut(&TextTag)>(&self, func: P) {
let func_data: P = func;
unsafe extern "C" fn func_func<P: FnMut(&TextTag)>(
tag: *mut ffi::GtkTextTag,
data: glib::ffi::gpointer,
) {
let tag = from_glib_borrow(tag);
let callback: *mut P = data as *const _ as usize as *mut P;
(*callback)(&tag);
}
let func = Some(func_func::<P> as _);
let super_callback0: &P = &func_data;
unsafe {
ffi::gtk_text_tag_table_foreach(
self.as_ref().to_glib_none().0,
func,
super_callback0 as *const _ as usize as *mut _,
);
}
}
fn size(&self) -> i32 {
unsafe { ffi::gtk_text_tag_table_get_size(self.as_ref().to_glib_none().0) }
}
fn lookup(&self, name: &str) -> Option<TextTag> {
unsafe {
from_glib_none(ffi::gtk_text_tag_table_lookup(
self.as_ref().to_glib_none().0,
name.to_glib_none().0,
))
}
}
fn remove<P: IsA<TextTag>>(&self, tag: &P) {
unsafe {
ffi::gtk_text_tag_table_remove(
self.as_ref().to_glib_none().0,
tag.as_ref().to_glib_none().0,
);
}
}
fn connect_tag_added<F: Fn(&Self, &TextTag) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn tag_added_trampoline<
P: IsA<TextTagTable>,
F: Fn(&P, &TextTag) + 'static,
>(
this: *mut ffi::GtkTextTagTable,
tag: *mut ffi::GtkTextTag,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TextTagTable::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(tag),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"tag-added\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
tag_added_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_tag_changed<F: Fn(&Self, &TextTag, bool) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn tag_changed_trampoline<
P: IsA<TextTagTable>,
F: Fn(&P, &TextTag, bool) + 'static,
>(
this: *mut ffi::GtkTextTagTable,
tag: *mut ffi::GtkTextTag,
size_changed: glib::ffi::gboolean,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TextTagTable::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(tag),
from_glib(size_changed),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"tag-changed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
tag_changed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_tag_removed<F: Fn(&Self, &TextTag) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn tag_removed_trampoline<
P: IsA<TextTagTable>,
F: Fn(&P, &TextTag) + 'static,
>(
this: *mut ffi::GtkTextTagTable,
tag: *mut ffi::GtkTextTag,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TextTagTable::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(tag),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"tag-removed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
tag_removed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for TextTagTable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("TextTagTable")
}
}
| 31.2723 | 107 | 0.519742 |
08a44b917535c467441d29bd6e8fbf1204f380ba | 2,181 | use serde_json::{Value, json};
use crate::error::MatcherError;
#[inline]
pub fn to_number(variable_name: &str, value: &mut Value) -> Result<(), MatcherError> {
match value {
Value::String(text) => {
if let Ok(u_value) = text.parse::<u64>() {
*value = json!(u_value);
Ok(())
} else if let Ok(i_value) = text.parse::<i64>() {
*value = json!(i_value);
Ok(())
} else if let Ok(f_value) = text.parse::<f64>() {
*value = json!(f_value);
Ok(())
} else {
Err(MatcherError::ExtractedVariableError {
message: format!(
"The 'to_number' modifier cannot parse string [{}] to number",
text
),
variable_name: variable_name.to_owned(),
})
}
}
Value::Number(..) => Ok(()),
_ => Err(MatcherError::ExtractedVariableError {
message:
"The 'to_number' modifier can be used only with values of type 'string' or 'number'"
.to_owned(),
variable_name: variable_name.to_owned(),
}),
}
}
#[cfg(test)]
mod test {
use super::*;
use serde_json::json;
#[test]
fn to_number_modifier_should_return_a_positive_number() {
let mut input = Value::String("12".to_owned());
to_number("", &mut input).unwrap();
assert_eq!(json!(12), input);
}
#[test]
fn to_number_modifier_should_return_a_negative_number() {
let mut input = Value::String("-3412".to_owned());
to_number("", &mut input).unwrap();
assert_eq!(json!(-3412), input);
}
#[test]
fn to_number_modifier_should_return_a_float() {
let mut input = Value::String("3.14".to_owned());
to_number("", &mut input).unwrap();
assert_eq!(json!(3.14), input);
}
#[test]
fn to_number_modifier_should_return_a_error() {
let mut input = Value::String("something".to_owned());
assert!(to_number("", &mut input).is_err());
}
}
| 31.157143 | 100 | 0.513984 |
5b836aec9238588dfebb291692c58a74f817f27a | 100 | //! OS-specific extensions.
cfg_unix! {
pub mod unix;
}
cfg_windows! {
pub mod windows;
}
| 10 | 27 | 0.62 |
03d4d8f150e2b5bf986aed697ca181633da2de7b | 692 | // tests2.rs
// This test has a problem with it -- make the test compile! Make the test
// pass! Make the test fail! Scroll down for hints :)
#[cfg(test)]
mod tests {
#[test]
fn you_can_assert_eq() {
assert_eq!(10, 10);
assert_eq!(true, true);
}
}
// Like the previous exercise, you don't need to write any code to get this test to compile and
// run. `assert_eq!` is a macro that takes two arguments and compares them. Try giving it two
// values that are equal! Try giving it two arguments that are different! Try giving it two values
// that are of different types! Try switching which argument comes first and which comes second!
| 15.043478 | 98 | 0.67052 |
fe2e7b7782686da46fad2701c9d70728f4dfc992 | 3,489 | // COPYRIGHT (C) 2017 barreiro. All Rights Reserved.
// Rust solvers for Project Euler problems
use euler::algorithm::long::{digits_sum, floor_sqrt, int_log_10, is_square, pow_10};
use euler::Solver;
// It is well known that if the square root of a natural number is not an integer, then it is irrational.
// The decimal expansion of such square roots is infinite without any repeating pattern at all.
//
// The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
//
// For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
const DIM: isize = 100;
const THRESHOLD: isize = pow_10(15);
pub struct Solver080 {
pub n: isize
}
impl Default for Solver080 {
fn default() -> Self {
Solver080 { n: 100 }
}
}
impl Solver for Solver080 {
fn solve(&self) -> isize {
// "Square roots by subtraction" by Frazer Jarvis ( http://www.afjarvis.staff.shef.ac.uk/maths/jarvisspec02.pdf )
(2..=self.n).filter(|&n| !is_square(n)).map(|n| {
let (mut a, mut b, mut i) = (vec![5 * n], vec![5], DIM - int_log_10(floor_sqrt(n)));
loop {
if less(&a, &b) { // first branch fixes a digit of the root in b
if i == 0 { break; } else { i -= 1; }
insert_zero(&mut b);
mul_scalar(&mut a, 100);
} else {
sub(&mut a, &b);
add_scalar(&mut b, 10);
}
}
b.iter().map(|&d| digits_sum(d)).sum::<isize>() - 5 // b ends with an extra '5'
}).sum()
}
}
// -- //
// convenience function that compares two vector numbers
fn less(a: &[isize], b: &[isize]) -> bool {
if a.len() == b.len() {
(0..a.len()).rev().find(|&i| a[i] != b[i]).map(|i| a[i] < b[i]).unwrap_or_default()
} else {
a.len() < b.len()
}
}
// add a zero just before the final digit (which will always be '5')
fn insert_zero(a: &mut Vec<isize>) {
a[0] /= 10;
a[0] *= 10;
mul_scalar(a, 10);
a[0] += 5;
}
// convenience function that calculates a *= c where c is *not* a vector number
fn mul_scalar(a: &mut Vec<isize>, c: isize) {
a.iter_mut().for_each(|i| *i *= c);
for i in 0..a.len() {
if a[i] >= THRESHOLD {
if i == a.len() - 1 { a.push(a[i] / THRESHOLD) } else { a[i + 1] += a[i] / THRESHOLD }
a[i] %= THRESHOLD;
}
}
}
// convenience function that calculates a -= b
fn sub(a: &mut Vec<isize>, b: &[isize]) {
while a.len() < b.len() { a.push(0); }
for i in 0..b.len() {
a[i] -= b[i];
if a[i] < 0 {
a[i] += THRESHOLD;
a[i + 1] -= 1;
} else if a[i] >= THRESHOLD {
if i == a.len() - 1 { a.push(a[i] / THRESHOLD) } else { a[i + 1] += a[i] / THRESHOLD }
a[i] %= THRESHOLD;
}
}
// need to normalize in order to be able to compare based on length
while !a.is_empty() && *a.last().unwrap() == 0 {
a.pop();
}
}
// convenience function that calculates a += c where c is *not* a vector number
fn add_scalar(a: &mut Vec<isize>, c: isize) {
a[0] += c;
let mut i = 0;
while a[i] >= THRESHOLD {
if i == a.len() - 1 { a.push(a[i] / THRESHOLD) } else { a[i + 1] += a[i] / THRESHOLD }
a[i] %= THRESHOLD;
i += 1;
}
}
| 32.915094 | 157 | 0.536257 |
f55ecdfd09f2763b3c12fc85a7776738829c621c | 7,448 | /// Create a few trees, a light source and a camera, and position them such that the trees are lit
/// and visible from the camera. This code contains code copied from pyramid.rs.
use bevy::prelude::*;
use bevy::render::mesh::{Indices, Mesh};
use bevy::math::Vec3;
use bevy::render::render_resource::PrimitiveTopology;
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
) {
// Create and add a default material
let mut material_handle_crown = materials.add(StandardMaterial {
base_color: Color::rgb(0.3, 0.8, 0.3),
..Default::default()
});
let mut material_handle_trunk = materials.add(StandardMaterial {
base_color: Color::rgb(0.5, 0.3, 0.3),
..Default::default()
});
create_tree(&mut commands, &mut meshes, &mut material_handle_crown, &mut material_handle_trunk,
1., 0.3, 2.3, 0.8, Vec3::new(0.5, 0., -8.));
create_tree(&mut commands, &mut meshes, &mut material_handle_crown, &mut material_handle_trunk,
1., 0.3, 2.3, 0.8, Vec3::new(-0.5, 0., -7.));
create_tree(&mut commands, &mut meshes, &mut material_handle_crown, &mut material_handle_trunk,
1., 0.25, 3.5, 0.6, Vec3::new(-1.5, 0., -10.));
// Light
commands.spawn_bundle(PointLightBundle {
transform: Transform::from_xyz(2., 5., 2.),
..Default::default()
});
// Camera
commands.spawn_bundle(PerspectiveCameraBundle {
transform: Transform::from_xyz(0., 1.5, 0.),
..Default::default()
});
}
/// Construct a tree with a pyramid for the crown and a shape::Box for the trunk. The height and
/// width of each are passed as parameters. The bottom of the trunk is positioned at `location`.
/// Materials for the crown and trunk are passed in `material_handle_crown' and
/// `material_handle_trunk`. The tree and its meshes are added to `commands` and `meshes`.
fn create_tree(
commands: &mut Commands,
meshes: &mut ResMut<Assets<Mesh>>,
material_handle_crown: &Handle<StandardMaterial>,
material_handle_trunk: &Handle<StandardMaterial>,
trunk_height: f32,
trunk_width: f32,
crown_height: f32,
crown_width: f32,
location: Vec3,
) {
// Create a mesh for the tree trunk
commands.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Box::new(trunk_width, trunk_height, trunk_width))),
material: material_handle_trunk.clone(),
transform: Transform::from_translation(location + Vec3::new(0., trunk_height / 2.0, 0.)),
..Default::default()
});
// Create a mesh for the tree top
commands.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(Pyramid::new(8, crown_width, crown_height))),
material: material_handle_crown.clone(),
transform: Transform::from_translation(location + Vec3::new(0., trunk_height, 0.)),
..Default::default()
});
}
fn main() {
App::new()
.add_plugins(DefaultPlugins)
.add_startup_system(setup)
.run();
}
/* Everything below this line is intended to be in a separate file analogous to those in:
* bevy_render/src/mesh/shape/torus.rs
*/
/// A pyramid with a base in the XZ plane centered on the origin and its apex along +Y.
#[derive(Debug, Clone, Copy)]
pub struct Pyramid {
pub sides: u32,
pub side_length: f32,
pub height: f32,
}
impl Pyramid {
pub fn new(sides: u32, side_length: f32, height: f32) -> Self {
assert!(sides > 2, "Pyramids must have 3 or more sides");
Pyramid { sides, side_length, height }
}
}
impl Default for Pyramid {
fn default() -> Self {
Pyramid {
sides: 4,
side_length: 1.0,
height: 1.0,
}
}
}
impl From<Pyramid> for Mesh {
fn from(p: Pyramid) -> Self {
let angle = std::f32::consts::PI * 2. / p.sides as f32;
let half_width = p.side_length / 2.;
let radius = half_width / f32::sin(angle / 2.);
let apex = Vec3::new(0., p.height, 0.);
// Calculate vertexes forming each face. The first vertex is located on the positive Z axis
// and faces are created counter-clockwise (looking down the Y axis towards negative Y.
let mut base_vertexes = Vec::with_capacity(p.sides as usize);
for s in 0..p.sides {
let a = angle * s as f32;
base_vertexes.push(Vec3::new(radius * f32::sin(a), 0., radius * f32::cos(a)));
}
let mut vertexes = Vec::new();
let mut bottom_vertexes = Vec::new();
for s in 0..p.sides as usize {
// Determine normal by creating two vectors from the apex to the two other corners of
// this face, calculating their cross product and normalizing the result.
let b = &base_vertexes[s];
let c = &base_vertexes[(s + 1) % p.sides as usize];
let ver_ab = *b - apex;
let ver_ac = *c - apex;
let normal = ver_ab.cross(ver_ac).normalize().to_array();
vertexes.push((apex.to_array(), normal, [0.5, 1.]));
vertexes.push((b.to_array(), normal, [0., 0.]));
vertexes.push((c.to_array(), normal, [1., 0.]));
bottom_vertexes.push(b);
}
// Translate a `Vec3` position on the bottom face to u, v coordinates returned as an
// array. `limit` is the largest absolute distance that the position can be from the
// origin. This function therefore translates -limit..=limit to 0..=1 for both axes.
fn xz_to_uv(pos: &Vec3, limit: f32) -> [f32; 2] {
[ (pos.x + limit) / (limit * 2.),
(pos.z + limit) / (limit * 2.),
]
}
// Vertexes for the bottom face were saved in a counter-clockwise direction when looking
// from +Y to the origin. Their order is reversed so they are CCW when looking at the
// bottom face of the pyramid from -Y.
bottom_vertexes.reverse();
// The last vertex in the list is the one nearest +Z. It is used as the first vertex in all
// triangles forming the bottom face.
let vertex_nearest_pos_z = bottom_vertexes.pop().unwrap();
let texture_bound = vertex_nearest_pos_z.z;
for pair in bottom_vertexes.windows(2) {
let normal = [0., -1., 0.];
vertexes.push((vertex_nearest_pos_z.to_array(), normal, [0.5, 1.]));
vertexes.push((pair[0].to_array(), normal, xz_to_uv(pair[0], texture_bound)));
vertexes.push((pair[1].to_array(), normal, xz_to_uv(pair[1], texture_bound)));
}
let num_vertexes = 6 * p.sides - 6;
let mut positions = Vec::with_capacity(num_vertexes as usize);
let mut normals = Vec::with_capacity(num_vertexes as usize);
let mut uvs = Vec::with_capacity(num_vertexes as usize);
for (position, normal, uv) in vertexes.iter() {
positions.push(*position);
normals.push(*normal);
uvs.push(*uv);
}
let mut mesh = Mesh::new(PrimitiveTopology::TriangleList);
mesh.insert_attribute(Mesh::ATTRIBUTE_POSITION, positions);
mesh.insert_attribute(Mesh::ATTRIBUTE_NORMAL, normals);
mesh.insert_attribute(Mesh::ATTRIBUTE_UV_0, uvs);
mesh.set_indices(Some(Indices::U32((0..num_vertexes).collect())));
mesh
}
}
| 36.509804 | 99 | 0.617884 |
bbc9ee317e2afac59afb143b82b586db04554414 | 1,484 | use clap::{Arg, Command};
use std::error::Error;
#[derive(Clone, Default)]
pub struct Argument {
pub config_file: String,
pub show_ui: bool,
pub version_info: String,
}
impl Argument {
pub fn new() -> Self {
Argument {
..Default::default()
}
}
pub fn parse(&mut self) -> Result<(), Box<dyn Error>> {
self.version_info =
concat!(env!("CARGO_PKG_VERSION"), "-build-", env!("build")).to_string();
let matches = Command::new("ninja")
.version(&*self.version_info)
.arg(
Arg::new("config_file")
.short('c')
.long("config-file")
.value_name("NAME")
.help("Config file (.yml)")
.takes_value(true)
.required(true),
)
.arg(
Arg::new("show_ui")
.short('u')
.long("show-ui")
.help("Show UI")
.takes_value(false)
.required(false),
)
.get_matches();
match matches.value_of("config_file") {
Some(name) => self.config_file = name.to_string(),
None => self.config_file = "".to_string(),
}
if matches.is_present("show_ui") {
self.show_ui = true;
} else {
self.show_ui = false;
}
Ok(())
}
}
| 26.035088 | 85 | 0.440027 |
d5324e03a16d0c636483332b4368a0aaaf42b968 | 36 | type T = u32;
const bits: uint = 32; | 18 | 22 | 0.638889 |
e2c020eb54e830013bb5a6d61f342ce8278581e0 | 380 | // Take a look at the license at the top of the repository in the LICENSE file.
glib::wrapper! {
#[doc(alias = "GdkTimeCoord")]
pub struct TimeCoord(BoxedInline<ffi::GdkTimeCoord>);
}
impl TimeCoord {
pub fn time(&self) -> u32 {
self.inner.time
}
pub fn axes(&self) -> &[f64; ffi::GDK_MAX_TIMECOORD_AXES as usize] {
&self.inner.axes
}
}
| 22.352941 | 79 | 0.626316 |
bbd4fdd9daa7d437036f77fdd51c1c48b2c3e670 | 849 | use std::{
env,
env::current_dir,
error::Error,
fs::{read_to_string, File},
io::{BufWriter, Write},
path::Path,
};
pub fn main() -> Result<(), Box<dyn Error>> {
let out_dir = env::var("OUT_DIR")?;
let dest_bundle_umd_path = Path::new(&out_dir).join("tauri.bundle.umd.js");
let mut bundle_umd_file = BufWriter::new(File::create(&dest_bundle_umd_path)?);
let bundle_umd_path = current_dir()?.join("../../api/dist/tauri.bundle.umd.js");
println!("cargo:rerun-if-changed={:?}", bundle_umd_path);
if let Ok(bundle_umd_js) = read_to_string(bundle_umd_path) {
write!(bundle_umd_file, "{}", bundle_umd_js)?;
} else {
write!(
bundle_umd_file,
r#"throw new Error("you are trying to use the global Tauri script but the @tauri-apps/api package wasn't compiled; run `yarn build` first")"#
)?;
}
Ok(())
}
| 29.275862 | 147 | 0.656066 |
508bda6b3310868a72fef49ec35a1e04cf604800 | 7,802 | use std::fs;
use std::path::PathBuf;
use linked_hash_map::LinkedHashMap;
use log::debug;
use pest::error::Error as PestError;
use pest::iterators::Pair;
use pest::Parser;
use crate::config::VariableInfo;
pub type AnswerInfo = VariableInfo;
#[derive(Debug, Deserialize, Serialize)]
pub struct AnswerConfig {
#[serde(skip_serializing_if = "LinkedHashMap::is_empty")]
answers: LinkedHashMap<String, AnswerInfo>,
}
#[derive(Debug, PartialEq, thiserror::Error)]
pub enum AnswerConfigError {
#[error("Error parsing answer config: {0}")]
ParseError(String),
#[error("Missing answer config")]
MissingError,
}
impl From<serde_yaml::Error> for AnswerConfigError {
fn from(error: serde_yaml::Error) -> Self {
AnswerConfigError::ParseError(error.to_string())
}
}
impl From<std::io::Error> for AnswerConfigError {
fn from(_: std::io::Error) -> Self {
// TODO: Distinguish between missing and other errors
AnswerConfigError::MissingError
}
}
impl AnswerConfig {
pub fn load<P: Into<PathBuf>>(path: P) -> Result<AnswerConfig, AnswerConfigError> {
let path = path.into();
if path.is_dir() {
let answer_file_names = vec![
"archetect.yml",
".archetect.yml",
"archetect.yaml",
".archetect.yaml",
".answers.yaml",
"answers.yaml",
];
for answer_file_name in answer_file_names {
let answers = path.join(answer_file_name);
if answers.exists() {
debug!("Reading Archetect config from '{}'", &answers.display());
let config = fs::read_to_string(answers)?;
let config = serde_yaml::from_str::<AnswerConfig>(&config)?;
return Ok(config);
}
}
} else {
let config = fs::read_to_string(path)?;
let config = serde_yaml::from_str::<AnswerConfig>(&config)?;
return Ok(config);
}
// TODO: Return Ok(None) instead of error
Err(AnswerConfigError::MissingError)
}
pub fn add_answer(&mut self, identifier: &str, value: &str) {
self.answers
.insert(identifier.to_owned(), AnswerInfo::with_value(value).build());
}
pub fn with_answer(mut self, identifier: &str, value: &str) -> AnswerConfig {
self.add_answer(identifier, value);
self
}
pub fn answers(&self) -> &LinkedHashMap<String, AnswerInfo> {
&self.answers
}
}
impl Default for AnswerConfig {
fn default() -> Self {
AnswerConfig {
answers: LinkedHashMap::new(),
}
}
}
#[derive(Parser)]
#[grammar = "config/answer_grammar.pest"]
struct AnswerParser;
#[derive(Debug, PartialEq)]
pub enum AnswerParseError {
PestError(PestError<Rule>),
}
impl From<PestError<Rule>> for AnswerParseError {
fn from(error: PestError<Rule>) -> Self {
AnswerParseError::PestError(error)
}
}
fn parse(source: &str) -> Result<(String, AnswerInfo), AnswerParseError> {
let mut pairs = AnswerParser::parse(Rule::answer, source)?;
Ok(parse_answer(pairs.next().unwrap()))
}
fn parse_answer(pair: Pair<Rule>) -> (String, AnswerInfo) {
assert_eq!(pair.as_rule(), Rule::answer);
let mut iter = pair.into_inner();
let identifier_pair = iter.next().unwrap();
let value_pair = iter.next().unwrap();
(
parse_identifier(identifier_pair),
AnswerInfo::with_value(parse_value(value_pair)).build(),
)
}
fn parse_identifier(pair: Pair<Rule>) -> String {
assert_eq!(pair.as_rule(), Rule::identifier);
pair.as_str().to_owned()
}
fn parse_value(pair: Pair<Rule>) -> String {
assert_eq!(pair.as_rule(), Rule::string);
pair.into_inner().next().unwrap().as_str().to_owned()
}
impl AnswerInfo {
pub fn parse(input: &str) -> Result<(String, AnswerInfo), AnswerParseError> {
parse(input)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_success() {
assert_eq!(
parse("key=value"),
Ok(("key".to_owned(), AnswerInfo::with_value("value").build()))
);
assert_eq!(
parse("key = value"),
Ok(("key".to_owned(), AnswerInfo::with_value("value").build()))
);
assert_eq!(
parse("key = value set"),
Ok(("key".to_owned(), AnswerInfo::with_value("value set").build()))
);
assert_eq!(
parse("key='value'"),
Ok(("key".to_owned(), AnswerInfo::with_value("value").build()))
);
assert_eq!(
parse("key='value set'"),
Ok(("key".to_owned(), AnswerInfo::with_value("value set").build()))
);
assert_eq!(
parse("key = 'value'"),
Ok(("key".to_owned(), AnswerInfo::with_value("value").build()))
);
assert_eq!(
parse("key=\"value\""),
Ok(("key".to_owned(), AnswerInfo::with_value("value").build()))
);
assert_eq!(
parse("key=\"value set\""),
Ok(("key".to_owned(), AnswerInfo::with_value("value set").build()))
);
assert_eq!(
parse("key = \"value\""),
Ok(("key".to_owned(), AnswerInfo::with_value("value").build()))
);
assert_eq!(
parse("key ="),
Ok(("key".to_owned(), AnswerInfo::with_value("").build()))
);
assert_eq!(
parse("key =''"),
Ok(("key".to_owned(), AnswerInfo::with_value("").build()))
);
assert_eq!(
parse(" key =\"\""),
Ok(("key".to_owned(), AnswerInfo::with_value("").build()))
);
}
#[test]
fn test_parse_fail() {
match parse("key") {
Err(AnswerParseError::PestError(_)) => (),
_ => panic!("Error expected"),
}
}
#[test]
fn test_parse_answer() {
assert_eq!(
parse_answer(AnswerParser::parse(Rule::answer, "key=value").unwrap().next().unwrap()),
("key".to_owned(), AnswerInfo::with_value("value").build())
);
assert_eq!(
parse_answer(
AnswerParser::parse(Rule::answer, "key='value'")
.unwrap()
.next()
.unwrap()
),
("key".to_owned(), AnswerInfo::with_value("value").build())
);
assert_eq!(
parse_answer(
AnswerParser::parse(Rule::answer, "key=\"value\"")
.unwrap()
.next()
.unwrap()
),
("key".to_owned(), AnswerInfo::with_value("value").build())
);
}
#[test]
fn test_parse_identifier() {
assert_eq!(
parse_identifier(AnswerParser::parse(Rule::identifier, "key").unwrap().next().unwrap()),
"key"
);
}
#[test]
fn test_parse_value() {
assert_eq!(
parse_value(AnswerParser::parse(Rule::string, "value").unwrap().next().unwrap()),
"value"
);
assert_eq!(
parse_value(AnswerParser::parse(Rule::string, "\"value\"").unwrap().next().unwrap()),
"value"
);
assert_eq!(
parse_value(AnswerParser::parse(Rule::string, "'value'").unwrap().next().unwrap()),
"value"
);
}
#[test]
fn test_serialize_answer_config() {
let config = AnswerConfig::default()
.with_answer("name", "Order Service")
.with_answer("author", "Jane Doe");
println!("{}", serde_yaml::to_string(&config).unwrap());
}
}
| 27.864286 | 100 | 0.539605 |
db254e67368e8f800b44b79e0c52fc89b816b4c1 | 3,214 | use std::fmt::Debug;
use thiserror::Error;
#[derive(Error, Debug, PartialEq, Eq)]
pub enum AlgonautError {
/// URL parse error.
#[error("Url parsing error.")]
BadUrl(String),
/// Token parse error.
#[error("Token parsing error.")]
BadToken,
/// Header parse error.
#[error("Headers parsing error.")]
BadHeader(String),
/// Missing the base URL of the REST API server.
#[error("Set an URL before calling build.")]
UnitializedUrl,
/// Missing the authentication token for the REST API server.
#[error("Set a token before calling build.")]
UnitializedToken,
/// HTTP calls errors
#[error("http error: {0}")]
Request(RequestError),
/// Internal errors (please open an [issue](https://github.com/manuelmauro/algonaut/issues)!)
#[error("Internal error: {0}")]
Internal(String),
}
#[derive(Error, Debug, PartialEq, Eq)]
#[error("{:?}, {}", url, details)]
pub struct RequestError {
pub url: Option<String>,
pub details: RequestErrorDetails,
}
impl RequestError {
pub fn new(url: Option<String>, details: RequestErrorDetails) -> RequestError {
RequestError { url, details }
}
}
#[derive(Error, Debug, PartialEq, Eq)]
pub enum RequestErrorDetails {
/// Http call error with optional message (returned by remote API)
#[error("Http error: {}, {}", status, message)]
Http { status: u16, message: String },
/// Timeout
#[error("Timeout connecting to the server.")]
Timeout,
/// Client generated errors (while e.g. building request or decoding response)
#[error("Client error: {}", description)]
Client { description: String },
}
impl From<algonaut_client::error::ClientError> for AlgonautError {
fn from(error: algonaut_client::error::ClientError) -> Self {
match error {
algonaut_client::error::ClientError::BadUrl(msg) => AlgonautError::BadUrl(msg),
algonaut_client::error::ClientError::BadToken => AlgonautError::BadToken,
algonaut_client::error::ClientError::BadHeader(msg) => AlgonautError::BadHeader(msg),
algonaut_client::error::ClientError::Request(e) => AlgonautError::Request(e.into()),
}
}
}
impl From<algonaut_client::error::RequestError> for RequestError {
fn from(error: algonaut_client::error::RequestError) -> Self {
RequestError::new(error.url.clone(), error.details.into())
}
}
impl From<algonaut_client::error::RequestErrorDetails> for RequestErrorDetails {
fn from(details: algonaut_client::error::RequestErrorDetails) -> Self {
match details {
algonaut_client::error::RequestErrorDetails::Http { status, message } => {
RequestErrorDetails::Http { status, message }
}
algonaut_client::error::RequestErrorDetails::Timeout => RequestErrorDetails::Timeout {},
algonaut_client::error::RequestErrorDetails::Client { description } => {
RequestErrorDetails::Client { description }
}
}
}
}
impl From<rmp_serde::encode::Error> for AlgonautError {
fn from(error: rmp_serde::encode::Error) -> Self {
AlgonautError::Internal(error.to_string())
}
}
| 35.318681 | 100 | 0.651213 |
6932a01aa1c37598ed935abf5f5d86ec5258d22b | 2,341 | #![feature(const_string_new)]
mod enclave_u;
#[cfg(feature = "sgx-test")]
mod test;
use crate::enclave_u::init_connection;
use enclave_u::run_server;
use enclave_u_common::enclave_u::init_enclave;
use log::{error, info, warn};
use sgx_types::sgx_status_t;
use sgx_urts::SgxEnclave;
use std::env;
use std::net::TcpListener;
use std::os::unix::io::AsRawFd;
use std::time::Duration;
const TIMEOUT_SEC: u64 = 5;
pub fn start_enclave() -> SgxEnclave {
match init_enclave(true) {
Ok(r) => {
info!("[+] Init Query Enclave Successful {}!", r.geteid());
r
}
Err(e) => {
panic!("[-] Init Query Enclave Failed {}!", e.as_str());
}
}
}
#[cfg(feature = "sgx-test")]
fn main() {
test::test_integration();
}
#[cfg(not(feature = "sgx-test"))]
fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 3 {
error!("Please provide the address:port to listen on (e.g. \"0.0.0.0:3443\") as the first argument and the ZMQ connection string (e.g. \"ipc://enclave.ipc\" or \"tcp://127.0.0.1:25933\") of the tx-validation server as the second");
return;
}
init_connection(&args[2]);
let enclave = start_enclave();
info!("Running TX Decryption Query server...");
let listener = TcpListener::bind(&args[1]).expect("failed to bind the TCP socket");
// FIXME: thread pool + rate-limiting
for stream in listener.incoming() {
match stream {
Ok(stream) => {
info!("new client connection");
let _ = stream.set_read_timeout(Some(Duration::new(TIMEOUT_SEC, 0)));
let _ = stream.set_write_timeout(Some(Duration::new(TIMEOUT_SEC, 0)));
let mut retval = sgx_status_t::SGX_SUCCESS;
let result =
unsafe { run_server(enclave.geteid(), &mut retval, stream.as_raw_fd()) };
match result {
sgx_status_t::SGX_SUCCESS => {
info!("client query finished");
}
e => {
warn!("client query failed: {}", e);
}
}
}
Err(e) => {
warn!("connection failed: {}", e);
}
}
}
}
| 30.402597 | 239 | 0.540367 |
61bfd38c5f4c8ed6798504c371c2f395d7a3e66a | 793 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:squirrelcupcake
fn cmp() -> isize {
match (Some('a'), None::<char>) {
(Some(_), _) => {
panic!("squirrelcupcake");
}
(_, Some(_)) => {
panic!();
}
_ => {
panic!("wat");
}
}
}
fn main() {
println!("{}", cmp());
}
| 27.344828 | 68 | 0.586381 |
03951c2233e0285afae5602a29636d044a0a468d | 6,627 | use std::fmt::Write;
use types::*;
pub fn color(c: Color) -> String {
match c {
Color::Black => "b",
Color::White => "w",
}
.to_string()
}
pub fn piece(p: Piece) -> String {
match p {
Piece::Pawn => "p",
Piece::Lance => "l",
Piece::Knight => "n",
Piece::Silver => "s",
Piece::Gold => "g",
Piece::Bishop => "b",
Piece::Rook => "r",
Piece::King => "k",
Piece::PPawn => "+p",
Piece::PLance => "+l",
Piece::PKnight => "+n",
Piece::PSilver => "+s",
Piece::Horse => "+r",
Piece::Dragon => "+b",
}
.to_string()
}
pub fn piece_with_color(c: Color, p: Piece) -> String {
match c {
Color::White => piece(p),
Color::Black => piece(p).to_uppercase(),
}
}
pub fn board(b: &Board) -> String {
let mut n = 0;
let mut ret = String::new();
for j in 1..10 {
for i in (1..10).rev() {
match b[Point::one_start(i, j)] {
Some((c, p)) => {
if n > 0 {
write!(ret, "{}", n).unwrap();
n = 0;
}
write!(ret, "{}", piece_with_color(c, p)).unwrap();
}
None => n += 1,
}
}
if n > 0 {
write!(ret, "{}", n).unwrap();
n = 0;
}
if j != 9 {
write!(ret, "/").unwrap();
}
}
ret
}
pub fn captured(captured: &Captured) -> String {
let mut ret = String::new();
if captured.is_empty() {
write!(ret, "-").unwrap();
}
for (&c, cc) in captured.to_inner() {
for (&p, &n) in cc {
if n == 1 {
write!(ret, "{}", piece_with_color(c, p)).unwrap();
} else if n > 1 {
write!(ret, "{}{}", n, piece_with_color(c, p)).unwrap();
}
}
}
ret
}
pub fn sfen(p: &Position) -> String {
let mut ret = String::new();
write!(&mut ret,
"{} {} {} 1",
board(p.board()),
color(p.color()),
captured(p.captured()))
.unwrap();
ret
}
fn dan(i: u8) -> String {
match i {
1 => "a",
2 => "b",
3 => "c",
4 => "d",
5 => "e",
6 => "f",
7 => "g",
8 => "h",
9 => "i",
_ => "",
}
.to_string()
}
pub fn enc_move(m: &Move) -> String {
let mut ret = String::new();
match m.from() {
Some(p) => write!(ret, "{}{}", p.x + 1, dan(p.y + 1)).unwrap(),
None => write!(ret, "{}*", piece_with_color(m.color(), m.piece())).unwrap(),
}
write!(ret, "{}{}", m.to().x + 1, dan(m.to().y + 1)).unwrap();
if m.is_promote() {
write!(ret, "+").unwrap()
}
ret
}
pub fn position(p: &Position, moves: &[Move]) -> String {
let mut ret = String::new();
write!(ret, "position sfen {} moves", sfen(p)).unwrap();
for &m in moves {
write!(ret, " {}", enc_move(&m)).unwrap()
}
ret
}
#[cfg(test)]
mod tests {
use types::*;
#[test]
fn it_works() {
let mut captured = Captured::default();
assert_eq!(super::captured(&captured), "-");
captured.add(Color::Black, Piece::Silver);
captured.add(Color::Black, Piece::Pawn);
captured.add(Color::Black, Piece::Pawn);
captured.add(Color::White, Piece::Bishop);
captured.add(Color::White, Piece::Pawn);
captured.add(Color::White, Piece::Pawn);
captured.add(Color::White, Piece::Pawn);
assert_eq!(super::captured(&captured), "2PS3pb");
assert_eq!(super::board(&Board::hirate()),
"lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL");
assert_eq!(super::enc_move(&Move::Move {
color: Color::Black,
from: Point::one_start(7, 7),
to: Point::one_start(7, 6),
piece: Piece::Pawn,
promote: false,
}),
"7g7f");
assert_eq!(super::enc_move(&Move::Drop {
color: Color::Black,
to: Point::one_start(7, 6),
piece: Piece::Pawn,
}),
"P*7f");
assert_eq!(super::position(&Position::new(Board::hirate(),
Captured::default(),
Color::Black),
&vec![Move::Move {
color: Color::Black,
from: Point::one_start(7, 7),
to: Point::one_start(7, 6),
piece: Piece::Pawn,
promote: false,
},
Move::Move {
color: Color::White,
from: Point::one_start(3, 3),
to: Point::one_start(3, 4),
piece: Piece::Pawn,
promote: false,
}]),
"position sfen lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1 \
moves 7g7f 3c3d")
}
#[test]
fn make_move_check() {
let mut p = Position::hirate();
p.make_move(&Move::Move {
color: Color::Black,
from: Point::one_start(7, 7),
to: Point::one_start(7, 6),
piece: Piece::Pawn,
promote: false,
})
.unwrap();
assert_eq!(super::sfen(&p),
"lnsgkgsnl/1r5b1/ppppppppp/9/9/2P6/PP1PPPPPP/1B5R1/LNSGKGSNL w - 1");
let mut p = Position::hirate();
let m = Move::Move {
color: Color::Black,
from: Point::one_start(7, 7),
to: Point::one_start(7, 6),
piece: Piece::PPawn,
promote: true,
};
p.make_move(&m).unwrap();
assert_eq!(super::sfen(&p),
"lnsgkgsnl/1r5b1/ppppppppp/9/9/2+P6/PP1PPPPPP/1B5R1/LNSGKGSNL w - 1");
}
}
| 30.260274 | 99 | 0.389467 |
eb9794a84575d7fe1564ef6bc35067c3ecf054bc | 3,556 | /*
SPDX-FileCopyrightText: 2022 localthomas
SPDX-License-Identifier: MIT OR Apache-2.0
*/
mod cache;
mod config;
mod drawio;
mod pandoc;
use std::{ffi::OsStr, io::Write, path::Path};
use anyhow::{anyhow, Context, Result};
use cache::{ConverterCache, NoCacheConverter, OutputFormat};
use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator};
use crate::{
config::Config,
drawio::DrawioConverter,
pandoc::{Image, PandocDocument},
};
fn main() -> Result<()> {
// read the CLI arguments and options
let config = Config::new().context("could not create configuration")?;
// print credits and exit
if config.credits {
let credits = include_str!("../license.html");
println!("{}", credits);
return Ok(());
}
// prepare the drawio converter
let drawio = DrawioConverter::new(&config.xvfb_run_cmd, &config.drawio_cmd)
.context("could not create drawio converter")?;
let converter = NoCacheConverter::new(&drawio);
// read the pandoc AST from stdin
let stdin = std::io::stdin();
// Note: do not read all of the stdin data into a buffer, as the stdin input of pandoc can be very large:
// e.g. `fill_buf()` would not work due to an internal size limitation of the buffer.
let input_data = stdin.lock();
// convert the AST to a Rust representation
let mut pandoc = PandocDocument::new(input_data).context("could not create pandoc document")?;
// get all possible images from the document and filter for *.drawio files
let mut drawio_images: Vec<Image> = pandoc
.get_all_images()
.into_iter()
.filter(|image| {
let path_string: String = image.image_url_read_only().clone();
Path::new(&path_string).extension().and_then(OsStr::to_str) == Some("drawio")
})
.collect();
// convert each image to its output format
drawio_images
.par_iter_mut()
.try_for_each(|image| -> Result<()> {
convert_image(&converter, image, &config.format).context("could not convert image")?;
Ok(())
})?;
// write the document AST back as JSON to stdout
std::io::stdout()
.write_all(
&pandoc
.to_json()
.context("could not get JSON format of pandoc document")?,
)
.context("could not write the pandoc document to stdout")?;
Ok(())
}
/// Converts the image to a suitable output format depending on the document output format.
/// On success, the `image` was altered to reference the new image file.
fn convert_image(converter: &dyn ConverterCache, image: &mut Image, format: &str) -> Result<()> {
let path_string: String = image.image_url_read_only().clone();
let input_path = Path::new(&path_string);
let output_format = match format {
"pdf" | "latex" | "context" => OutputFormat::Pdf,
"html" | "html5" | "html4" => OutputFormat::Svg,
_ => return Err(anyhow!("unknown or unsupported format: {}", format)),
};
let output_path = converter
.convert(input_path, output_format)
.context("could not convert to output format")?;
// change the pandoc image path to the new output path
let image_url_reference = image.image_url();
*image_url_reference = output_path
.to_str()
.ok_or_else(|| {
anyhow!(
"the output path for a converted file is not valid utf8: {:?}",
output_path
)
})?
.to_string();
Ok(())
}
| 33.233645 | 109 | 0.625984 |
d59ce86cf21ed2c2670b53a1a423a82355fcc1ff | 8,366 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use futures::{FutureExt, StreamExt};
use rand::Rng;
struct Command<'a> {
args: Vec<&'a str>,
expected_stdout: &'a str,
expected_stderr: &'a str,
}
#[cfg(test)]
async fn test_cli(commands: Vec<Command<'_>>) {
let mut fs = fuchsia_component::server::ServiceFs::new_local();
//TODO(atait): Why can't two component proxies establish a connection with one another? It would be
// preferable to have both fuchsia.net.dhcp.Server and fuchsia.stash.Store added as component
// proxies, ensuring new component instances per test case for both services. However, when both
// are added as component proxies, dhcpd return ZX_ERR_PEER_CLOSED when it connects to stash. As
// a work around, a random stash identifier is generated per test case, ensuring that one test
// case does not pollute another.
fs.add_proxy_service::<fidl_fuchsia_stash::StoreMarker, _>()
.add_component_proxy_service::<fidl_fuchsia_net_dhcp::Server_Marker, _>(
fuchsia_component::fuchsia_single_component_package_url!("dhcpd").to_string(),
Some(vec![
"--stash".to_string(),
rand::thread_rng().sample_iter(&rand::distributions::Alphanumeric).take(8).collect(),
]),
);
let env =
fs.create_salted_nested_environment("test_cli").expect("failed to create environment");
let fs = fs.for_each_concurrent(None, |()| async move {});
futures::pin_mut!(fs);
for Command { args, expected_stdout, expected_stderr } in commands {
let output = fuchsia_component::client::AppBuilder::new(
fuchsia_component::fuchsia_single_component_package_url!("dhcpd-cli"),
)
.args(args)
.output(env.launcher())
.expect("failed to launch dhcpd-cli");
let output = futures::select! {
() = fs => panic!("request stream terminated"),
output = output.fuse() => output.expect("dhcpd-cli terminated with error"),
};
let stdout = std::str::from_utf8(&output.stdout).expect("failed to get stdout");
let stderr = std::str::from_utf8(&output.stderr).expect("failed to get stderr");
assert_eq!(stderr, expected_stderr);
assert_eq!(stdout, expected_stdout);
}
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_get_option_subnet() {
test_cli(vec![Command {
args: vec!["get", "option", "subnet-mask"],
expected_stdout: "",
expected_stderr: r#"Error: get_option(SubnetMask(SubnetMask { mask: None })) failed
Caused by:
NOT_FOUND
"#,
}])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_get_parameter_lease() {
test_cli(vec![Command {
args: vec!["get", "parameter", "lease-length"],
expected_stdout: r#"Lease(
LeaseLength {
default: Some(
86400,
),
max: Some(
86400,
),
},
)
"#,
expected_stderr: "",
}])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_set_option_subnet() {
test_cli(vec![Command {
args: vec!["set", "option", "subnet-mask", "--mask", "255.255.255.0"],
expected_stdout: "",
expected_stderr: "",
}])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_set_parameter_lease() {
test_cli(vec![Command {
args: vec!["set", "parameter", "lease-length", "--default", "42"],
expected_stdout: "",
expected_stderr: "",
}])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_list_option() {
test_cli(vec![
Command {
args: vec!["set", "option", "subnet-mask", "--mask", "255.255.255.0"],
expected_stdout: "",
expected_stderr: "",
},
Command {
args: vec!["list", "option"],
expected_stdout: r#"[
SubnetMask(
Ipv4Address {
addr: [
255,
255,
255,
0,
],
},
),
]
"#,
expected_stderr: "",
},
])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_list_parameter() {
test_cli(vec![Command {
args: vec!["list", "parameter"],
expected_stdout: r#"[
IpAddrs(
[
Ipv4Address {
addr: [
192,
168,
0,
1,
],
},
],
),
AddressPool(
AddressPool {
network_id: Some(
Ipv4Address {
addr: [
192,
168,
0,
0,
],
},
),
broadcast: Some(
Ipv4Address {
addr: [
192,
168,
0,
128,
],
},
),
mask: Some(
Ipv4Address {
addr: [
255,
255,
255,
128,
],
},
),
pool_range_start: Some(
Ipv4Address {
addr: [
192,
168,
0,
0,
],
},
),
pool_range_stop: Some(
Ipv4Address {
addr: [
192,
168,
0,
0,
],
},
),
},
),
Lease(
LeaseLength {
default: Some(
86400,
),
max: Some(
86400,
),
},
),
PermittedMacs(
[],
),
StaticallyAssignedAddrs(
[],
),
ArpProbe(
false,
),
BoundDeviceNames(
[],
),
]
"#,
expected_stderr: "",
}])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_reset_option() {
test_cli(vec![
Command {
args: vec!["set", "option", "subnet-mask", "--mask", "255.255.255.0"],
expected_stdout: "",
expected_stderr: "",
},
Command {
args: vec!["list", "option"],
expected_stdout: r#"[
SubnetMask(
Ipv4Address {
addr: [
255,
255,
255,
0,
],
},
),
]
"#,
expected_stderr: "",
},
Command { args: vec!["reset", "option"], expected_stdout: "", expected_stderr: "" },
Command { args: vec!["list", "option"], expected_stdout: "[]\n", expected_stderr: "" },
])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_reset_parameter() {
test_cli(vec![
Command {
args: vec!["set", "parameter", "lease-length", "--default", "42"],
expected_stdout: "",
expected_stderr: "",
},
Command {
args: vec!["get", "parameter", "lease-length"],
expected_stdout: r#"Lease(
LeaseLength {
default: Some(
42,
),
max: Some(
42,
),
},
)
"#,
expected_stderr: "",
},
Command { args: vec!["reset", "parameter"], expected_stdout: "", expected_stderr: "" },
Command {
args: vec!["get", "parameter", "lease-length"],
expected_stdout: r#"Lease(
LeaseLength {
default: Some(
86400,
),
max: Some(
86400,
),
},
)
"#,
expected_stderr: "",
},
])
.await
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_clear_leases() {
test_cli(vec![Command { args: vec!["clear-leases"], expected_stdout: "", expected_stderr: "" }])
.await
}
| 26.391167 | 103 | 0.476094 |
8f4f17e1d1ba539f6453419bada7a0aa1a3a3e3c | 2,426 | //! # 82. 删除排序链表中的重复元素 II
//!
//! 难度 中等
//!
//! 给定一个排序链表,删除所有含有重复数字的节点,只保留原始链表中 没有重复出现 的数字。
//!
//! ## 示例 1:
//!
//! ```text
//! 输入: 1->2->3->3->4->4->5
//! 输出: 1->2->5
//! ```
//!
//! ## 示例 2:
//!
//! ```text
//! 输入: 1->1->1->2->3
//! 输出: 2->3
//! ```
//!
//! See [leetcode](https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list-ii/)
//!
use crate::ListNode;
pub struct Solution;
impl Solution {
pub fn delete_duplicates(mut head: Option<Box<ListNode>>) -> Option<Box<ListNode>> {
if head.is_none() || head.as_ref().unwrap().next.is_none() {
return head;
}
if head.as_ref().unwrap().val == head.as_ref().unwrap().next.as_ref().unwrap().val {
while head.is_some() && head.as_mut().unwrap().next.is_some() && head.as_mut().unwrap().next.as_mut().unwrap().val == head.as_mut().unwrap().val {
head = head.as_mut().unwrap().next.take();
}
return Solution::delete_duplicates(head.and_then(|mut head| head.next.take()));
} else {
head.as_mut().unwrap().next = Solution::delete_duplicates(head.as_mut().unwrap().next.take());
head
}
}
pub fn delete_duplicates_2(head: Option<Box<ListNode>>) -> Option<Box<ListNode>> {
head.and_then(|mut head| {
let mut next = head.next.take();
if next.is_none() {
return head.into();
}
if next.as_ref().unwrap().val == head.val {
while next.is_some() && head.val == next.as_ref().unwrap().val {
head = next.unwrap();
next = head.next.take();
}
Solution::delete_duplicates_2(next)
} else {
head.next = Solution::delete_duplicates_2(next);
head.into()
}
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::list;
#[test]
fn test() {
let t = |t| ListNode::into_vec(Solution::delete_duplicates(t));
let t2 = |t| ListNode::into_vec(Solution::delete_duplicates_2(t));
let cases = vec![
(vec![1,2,5], list![1,2,3,3,4,4,5]),
(vec![2,3], list![1,1,1,2,3]),
(vec![], list![]),
];
for (expect, input) in cases {
assert_eq!(expect, t(input.clone()));
assert_eq!(expect, t2(input));
}
}
}
| 27.885057 | 158 | 0.501237 |
16403a949c46c5e85452595e590bef45850f1ef1 | 2,504 | use crate::boundaries;
use crate::boundaries::{
DeaneryCollectionQueryResponse, DeaneryDbGateway, DeaneryDbResponse, DeaneryQueryRequest,
DeaneryQueryResponse,
};
use crate::entity::deanery::Deanery;
use async_trait::async_trait;
pub struct DeaneryQueryInteractor<A: DeaneryDbGateway> {
db_gateway: A,
}
#[async_trait]
impl<A> boundaries::DeaneryQueryInputBoundary for DeaneryQueryInteractor<A>
where
A: DeaneryDbGateway + Sync + Send,
{
async fn get_deanery(&self, request: DeaneryQueryRequest) -> Option<DeaneryQueryResponse> {
println!(
"deanery query input boundary {}",
request.id.unwrap().to_hyphenated()
);
if let Some(db_response) = ((*self).db_gateway.find_by_id(request.id.unwrap())).await {
println!("deanery found");
return Some(DeaneryQueryResponse {
deanery: db_response.to_deanery_entity(),
});
} else {
println!("deanery not found");
return None;
}
}
async fn get_deaneries(&self, request: DeaneryQueryRequest) -> DeaneryCollectionQueryResponse {
println!("deanery query input boundary");
let name = request.name;
let diocese_id = request.diocese_id;
let offset = request.offset;
let count = request.count;
let result = ((*self)
.db_gateway
.get_deanery_collection(name, diocese_id, offset, count))
.await;
let collection = result
.collection
.into_iter()
.map(|deanery_db_response| deanery_db_response.to_deanery_entity())
.collect();
DeaneryCollectionQueryResponse {
collection: collection,
has_more: result.has_more,
total: result.total,
}
}
}
impl<A> DeaneryQueryInteractor<A>
where
A: DeaneryDbGateway + Sync + Send,
{
pub fn new(db_gateway: A) -> Self {
DeaneryQueryInteractor { db_gateway }
}
}
impl DeaneryDbResponse {
pub fn to_deanery_entity(&self) -> Deanery {
let diocese = self.diocese.to_diocese_entity();
Deanery {
diocese: diocese,
id: self.id.clone(),
name: self.name.clone(),
location_email: self.location_email.clone(),
location_address: self.location_address.clone(),
location_name: self.location_name.clone(),
person_in_charge: self.person_in_charge.clone(),
}
}
}
| 30.91358 | 99 | 0.620208 |
48f0fb4115715e96f011d5b860f1584f2e6b7c34 | 13,757 | use crate::ast::*;
use crate::error::TransformationError;
use crate::transformations::*;
use crate::util;
use std::usize;
/// Settings for general transformations.
pub struct GeneralSettings {}
/// Moves flat headings into a hierarchical structure based on their depth.
pub fn fold_headings_transformation(mut root: Element, settings: &GeneralSettings) -> TResult {
// append following deeper headings than current_depth in content to the result list.
fn move_deeper_headings<'a>(
trans: &TFuncInplace<&'a GeneralSettings>,
root_content: &mut Vec<Element>,
settings: &'a GeneralSettings,
) -> TListResult {
let mut result = vec![];
let mut current_heading_index = 0;
// current maximum depth level, every deeper heading will be moved
let mut current_depth = usize::MAX;
for child in root_content.drain(..) {
if let Element::Heading(cur_heading) = child {
if cur_heading.depth > current_depth {
let last = result.get_mut(current_heading_index);
if let Some(&mut Element::Heading(ref mut e)) = last {
e.content.push(Element::Heading(cur_heading));
}
} else {
// pick a new reference heading if the new one
// is equally deep or more shallow
current_heading_index = result.len();
current_depth = cur_heading.depth;
result.push(Element::Heading(cur_heading));
}
} else {
if current_depth < usize::MAX {
return Err(TransformationError {
cause: "a non-heading element was found after a heading. \
This should not happen."
.to_string(),
position: child.get_position().clone(),
transformation_name: String::from("fold_headings_transformation"),
tree: child.clone(),
});
}
result.push(child);
}
}
// recurse transformation
result = apply_func_drain(trans, &mut result, settings)?;
Ok(result)
};
root = recurse_inplace_template(
&fold_headings_transformation,
root,
settings,
&move_deeper_headings,
)?;
Ok(root)
}
/// Moves list items of higher depth into separate sub-lists.
/// If a list is started with a deeper item than one, this transformation still applies,
/// although this should later be a linter error.
pub fn fold_lists_transformation(mut root: Element, settings: &GeneralSettings) -> TResult {
// move list items which are deeper than the current level into new sub-lists.
fn move_deeper_items<'a>(
trans: &TFuncInplace<&'a GeneralSettings>,
root_content: &mut Vec<Element>,
settings: &'a GeneralSettings,
) -> TListResult {
// the currently least deep list item, every deeper
// list item will be moved to a new sublist
let mut lowest_depth = usize::MAX;
for child in &root_content[..] {
if let Element::ListItem(ref e) = *child {
if e.depth < lowest_depth {
lowest_depth = e.depth;
}
} else {
return Err(TransformationError {
cause: String::from("A list should not contain non-listitems."),
transformation_name: String::from("fold_lists_transformation"),
position: child.get_position().clone(),
tree: child.clone(),
});
}
}
let mut result = vec![];
// create a new sublist when encountering a lower item
let mut create_sublist = true;
for child in root_content.drain(..) {
if let Element::ListItem(cur_item) = child {
if cur_item.depth > lowest_depth {
// this error is returned if the sublist to append to was not found
let build_found_error = |origin: &ListItem| TransformationError {
cause: "sublist was not instantiated properly.".into(),
transformation_name: "fold_lists_transformation".into(),
position: origin.position.clone(),
tree: Element::ListItem(origin.clone()),
};
if create_sublist {
// create a new sublist
create_sublist = false;
if result.is_empty() {
result.push(Element::ListItem(ListItem {
position: cur_item.position.clone(),
depth: lowest_depth,
kind: cur_item.kind,
content: vec![],
}));
}
if let Some(&mut Element::ListItem(ref mut last)) = result.last_mut() {
last.content.push(Element::List(List {
position: cur_item.position.clone(),
content: vec![],
}));
} else {
return Err(build_found_error(&cur_item));
}
}
if let Some(&mut Element::ListItem(ref mut item)) = result.last_mut() {
if let Some(&mut Element::List(ref mut l)) = item.content.last_mut() {
l.content.push(Element::ListItem(cur_item));
} else {
return Err(build_found_error(&cur_item));
}
} else {
return Err(build_found_error(&cur_item));
}
} else {
result.push(Element::ListItem(cur_item));
create_sublist = true;
}
} else {
result.push(child);
};
}
result = apply_func_drain(trans, &mut result, settings)?;
Ok(result)
};
if let Element::List { .. } = root {
root = recurse_inplace_template(
&fold_lists_transformation,
root,
settings,
&move_deeper_items,
)?;
} else {
root = recurse_inplace(&fold_lists_transformation, root, settings)?;
};
Ok(root)
}
/// Transform whitespace-only paragraphs to empty paragraphs.
pub fn whitespace_paragraphs_to_empty(mut root: Element, settings: &GeneralSettings) -> TResult {
if let Element::Paragraph(ref mut par) = root {
let mut is_only_whitespace = true;
for child in &par.content[..] {
if let Element::Text(ref text) = *child {
if !util::is_whitespace(&text.text) {
is_only_whitespace = false;
break;
}
} else {
is_only_whitespace = false;
break;
}
}
if is_only_whitespace {
par.content.drain(..);
}
};
root = recurse_inplace(&whitespace_paragraphs_to_empty, root, settings)?;
Ok(root)
}
/// Reduce consecutive paragraphs and absorb trailing text into one,
/// if not separated by a blank paragraph.
pub fn collapse_paragraphs(
mut root: Element,
settings: &GeneralSettings,
) -> Result<Element, TransformationError> {
fn squash_empty_paragraphs<'a>(
trans: &TFuncInplace<&'a GeneralSettings>,
root_content: &mut Vec<Element>,
settings: &'a GeneralSettings,
) -> TListResult {
let mut result = vec![];
let mut last_empty = false;
for mut child in root_content.drain(..) {
if let Element::Paragraph(ref mut par) = child {
if par.content.is_empty() {
last_empty = true;
continue;
}
// if the last paragraph was not empty, append to it.
if !last_empty {
if let Some(&mut Element::Paragraph(ref mut last)) = result.last_mut() {
// Add a space on line break
last.content.push(Element::Text(Text {
text: " ".into(),
position: last.position.clone(),
}));
last.content.append(&mut par.content);
last.position.end = par.position.end.clone();
continue;
}
}
};
result.push(child);
last_empty = false;
}
result = apply_func_drain(trans, &mut result, settings)?;
Ok(result)
}
root = recurse_inplace_template(
&collapse_paragraphs,
root,
settings,
&squash_empty_paragraphs,
)?;
Ok(root)
}
/// Collapse consecutive text tags into one, removing duplicate whitespace.
pub fn collapse_consecutive_text(
mut root: Element,
settings: &GeneralSettings,
) -> Result<Element, TransformationError> {
fn squash_text<'a>(
trans: &TFuncInplace<&'a GeneralSettings>,
root_content: &mut Vec<Element>,
settings: &'a GeneralSettings,
) -> TListResult {
let mut result = vec![];
for mut child in root_content.drain(..) {
if let Element::Text(ref mut text) = child {
if let Some(&mut Element::Text(ref mut last)) = result.last_mut() {
if util::is_whitespace(&text.text) {
last.text.push(' ');
} else {
last.text.push_str(&text.text);
}
last.position.end = text.position.end.clone();
continue;
}
};
result.push(child);
}
result = apply_func_drain(trans, &mut result, settings)?;
Ok(result)
}
root = recurse_inplace_template(&collapse_consecutive_text, root, settings, &squash_text)?;
Ok(root)
}
/// Enumerate anonymous template arguments as "1", "2", ...
pub fn enumerate_anon_args(mut root: Element, settings: &GeneralSettings) -> TResult {
if let Element::Template(ref mut template) = root {
let mut counter = 1;
for child in &mut template.content {
if let Element::TemplateArgument(ref mut arg) = *child {
if arg.name.trim().is_empty() {
arg.name.clear();
arg.name.push_str(&counter.to_string());
counter += 1;
}
}
}
};
recurse_inplace(&enumerate_anon_args, root, settings)
}
// taken from https://github.com/portstrom/parse_wiki_text/blob/master/src/default.rs
const PROTOCOLS: [&str; 28] = [
"//",
"bitcoin:",
"ftp://",
"ftps://",
"geo:",
"git://",
"gopher://",
"http://",
"https://",
"irc://",
"ircs://",
"magnet:",
"mailto:",
"mms://",
"news:",
"nntp://",
"redis://",
"sftp://",
"sip:",
"sips:",
"sms:",
"ssh://",
"svn://",
"tel:",
"telnet://",
"urn:",
"worldwind://",
"xmpp:",
];
/// only keep external references with actual urls
pub fn validate_external_refs(mut root: Element, settings: &GeneralSettings) -> TResult {
fn validate_erefs_vec<'a>(
trans: &TFuncInplace<&'a GeneralSettings>,
root_content: &mut Vec<Element>,
settings: &'a GeneralSettings,
) -> TListResult {
let mut result = vec![];
for mut child in root_content.drain(..) {
if let Element::ExternalReference(ref mut eref) = child {
let is_uri = PROTOCOLS.iter().any(|p| eref.target.trim().starts_with(p));
if is_uri {
eref.target = eref.target.trim().to_string();
result.push(child);
} else {
result.push(Element::Text(Text {
position: Span {
start: eref.position.start.clone(),
end: eref
.caption
.iter()
.next()
.map(|c| c.get_position().start.clone())
.unwrap_or(eref.position.end.clone()),
},
text: format!("[{}", eref.target),
}));
result.append(&mut eref.caption);
result.push(Element::Text(Text {
position: Span {
start: {
let mut s = eref.position.end.clone();
s.col -= 1;
s.offset -= 1;
s
},
end: eref.position.end.clone(),
},
text: "]".to_string(),
}));
}
} else {
result.push(child);
}
}
result = apply_func_drain(trans, &mut result, settings)?;
Ok(result)
}
root = recurse_inplace_template(&validate_external_refs, root, settings, &validate_erefs_vec)?;
Ok(root)
}
| 36.882038 | 99 | 0.492622 |
7191b5138f7540e6ee409b7c7a497cb5fc4bcb0b | 2,279 | use crate::{
LodestoneScraper,
error::*,
util::{Either, AsLodestone},
};
use ffxiv_types::{World, DataCenter};
use lodestone_parser::models::{
GrandCompany,
search::{
Paginated,
free_company::FreeCompanySearchItem,
},
};
use url::Url;
#[derive(Debug)]
pub struct FreeCompanySearchBuilder<'a> {
scraper: &'a LodestoneScraper,
// q
name: Option<&'a str>,
// worldname
world: Option<Either<World, DataCenter>>,
// gcid
grand_company: Option<Vec<GrandCompany>>,
// page
page: Option<u64>,
}
impl<'a> FreeCompanySearchBuilder<'a> {
pub fn new(scraper: &'a LodestoneScraper) -> Self {
FreeCompanySearchBuilder {
scraper,
name: None,
world: None,
grand_company: None,
page: None,
}
}
pub fn name(&mut self, n: &'a str) -> &mut Self {
self.name = Some(n);
self
}
pub fn world(&mut self, w: World) -> &mut Self {
self.world = Some(Either::Left(w));
self
}
pub fn data_center(&mut self, dc: DataCenter) -> &mut Self {
self.world = Some(Either::Right(dc));
self
}
pub fn grand_company(&mut self, gc: GrandCompany) -> &mut Self {
self.grand_company.get_or_insert_with(Default::default).push(gc);
self
}
pub fn page(&mut self, p: u64) -> &mut Self {
self.page = Some(p);
self
}
pub async fn send(&self) -> Result<Paginated<FreeCompanySearchItem>> {
let text = self.scraper.text(self.as_url()).await?;
lodestone_parser::parse_free_company_search(&text).map_err(Error::Parse)
}
pub fn as_url(&self) -> Url {
let mut url = crate::LODESTONE_URL.join("freecompany/").unwrap();
{
let mut pairs = url.query_pairs_mut();
if let Some(page) = self.page {
pairs.append_pair("page", &page.to_string());
}
if let Some(ref name) = self.name {
pairs.append_pair("q", name);
}
match self.world {
Some(Either::Left(w)) => { pairs.append_pair("worldname", w.as_str()); },
Some(Either::Right(dc)) => { pairs.append_pair("worldname", &dc.as_lodestone()); },
_ => {},
}
if let Some(ref gcs) = self.grand_company {
for gc in gcs {
pairs.append_pair("gcid", &gc.as_lodestone().to_string());
}
}
}
url
}
}
| 22.126214 | 91 | 0.600702 |
082a430e0ede1714345fc441b39362e8e19081ab | 1,967 | use super::*;
use crate::ast;
use crate::source_map;
use crate::with_default_globals;
use syntax_pos;
fn fun_to_string(
decl: &ast::FnDecl, header: ast::FnHeader, name: ast::Ident, generics: &ast::Generics
) -> String {
to_string(|s| {
s.head("");
s.print_fn(decl, header, Some(name),
generics, &source_map::dummy_spanned(ast::VisibilityKind::Inherited));
s.end(); // Close the head box
s.end(); // Close the outer box
})
}
fn variant_to_string(var: &ast::Variant) -> String {
to_string(|s| s.print_variant(var))
}
#[test]
fn test_fun_to_string() {
with_default_globals(|| {
let abba_ident = ast::Ident::from_str("abba");
let decl = ast::FnDecl {
inputs: Vec::new(),
output: ast::FunctionRetTy::Default(syntax_pos::DUMMY_SP),
c_variadic: false
};
let generics = ast::Generics::default();
assert_eq!(
fun_to_string(
&decl,
ast::FnHeader {
unsafety: ast::Unsafety::Normal,
constness: source_map::dummy_spanned(ast::Constness::NotConst),
asyncness: source_map::dummy_spanned(ast::IsAsync::NotAsync),
abi: Abi::Rust,
},
abba_ident,
&generics
),
"fn abba()"
);
})
}
#[test]
fn test_variant_to_string() {
with_default_globals(|| {
let ident = ast::Ident::from_str("principal_skinner");
let var = source_map::respan(syntax_pos::DUMMY_SP, ast::Variant_ {
ident,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
// making this up as I go.... ?
data: ast::VariantData::Unit(ast::DUMMY_NODE_ID),
disr_expr: None,
});
let varstr = variant_to_string(&var);
assert_eq!(varstr, "principal_skinner");
})
}
| 28.1 | 89 | 0.538892 |
5bae996709fcec8bead6e9c25920125c57e22e4f | 9,384 | use alvr_common::prelude::*;
use alvr_events::EventType;
use alvr_session::{ClientConnectionDesc, LinuxAudioBackend, SessionDesc};
use alvr_sockets::{AudioDevicesList, ClientListAction, GpuVendor, PathSegment};
use cpal::traits::{DeviceTrait, HostTrait};
use serde_json as json;
use std::{
collections::{hash_map::Entry, HashSet},
fs,
ops::{Deref, DerefMut},
path::{Path, PathBuf},
};
use tokio::sync::Notify;
use wgpu::Adapter;
fn save_session(session: &SessionDesc, path: &Path) -> StrResult {
fs::write(path, json::to_string_pretty(session).map_err(err!())?).map_err(err!())
}
// SessionDesc wrapper that saves settings.json and session.json on destruction.
pub struct SessionLock<'a> {
session_desc: &'a mut SessionDesc,
session_path: &'a Path,
}
impl Deref for SessionLock<'_> {
type Target = SessionDesc;
fn deref(&self) -> &SessionDesc {
self.session_desc
}
}
impl DerefMut for SessionLock<'_> {
fn deref_mut(&mut self) -> &mut SessionDesc {
self.session_desc
}
}
impl Drop for SessionLock<'_> {
fn drop(&mut self) {
save_session(self.session_desc, self.session_path).unwrap();
alvr_events::send_event(EventType::SessionUpdated); // deprecated
alvr_events::send_event(EventType::Session(Box::new(self.session_desc.clone())));
}
}
// Correct usage:
// SessionManager should be used behind a Mutex. Each write of the session should be preceded by a
// read, within the same lock.
// fixme: the dashboard is doing this wrong because it is holding its own session state. If read and
// write need to happen on separate threads, a critical region should be implemented.
pub struct ServerDataManager {
session: SessionDesc,
session_path: PathBuf,
script_engine: rhai::Engine,
gpu_adapters: Vec<Adapter>,
}
impl ServerDataManager {
pub fn new(session_path: &Path) -> Self {
let config_dir = session_path.parent().unwrap();
fs::create_dir_all(config_dir).ok();
let session_desc = match fs::read_to_string(&session_path) {
Ok(session_string) => {
let json_value = json::from_str::<json::Value>(&session_string).unwrap();
match json::from_value(json_value.clone()) {
Ok(session_desc) => session_desc,
Err(_) => {
fs::write(config_dir.join("session_old.json"), &session_string).ok();
let mut session_desc = SessionDesc::default();
match session_desc.merge_from_json(&json_value) {
Ok(_) => info!(
"{} {}",
"Session extrapolated successfully.",
"Old session.json is stored as session_old.json"
),
Err(e) => error!(
"{} {} {}",
"Error while extrapolating session.",
"Old session.json is stored as session_old.json.",
e
),
}
// not essential, but useful to avoid duplicated errors
save_session(&session_desc, session_path).ok();
session_desc
}
}
}
Err(_) => SessionDesc::default(),
};
let gpu_adapters = {
let instance = wgpu::Instance::new(wgpu::Backends::VULKAN);
instance
.enumerate_adapters(wgpu::Backends::VULKAN)
.collect()
};
let script_engine = rhai::Engine::new();
Self {
session: session_desc,
session_path: session_path.to_owned(),
script_engine,
gpu_adapters,
}
}
pub fn session(&self) -> &SessionDesc {
&self.session
}
pub fn session_mut(&mut self) -> SessionLock {
SessionLock {
session_desc: &mut self.session,
session_path: &self.session_path,
}
}
// Note: "value" can be any session subtree, in json format.
pub fn set_single_value(&mut self, path: Vec<PathSegment>, value: &str) -> StrResult {
let mut session_json = serde_json::to_value(self.session.clone()).map_err(err!())?;
let mut session_ref = &mut session_json;
for segment in path {
session_ref = match segment {
PathSegment::Name(name) => session_ref.get_mut(name).ok_or_else(enone!())?,
PathSegment::Index(index) => session_ref.get_mut(index).ok_or_else(enone!())?,
};
}
*session_ref = serde_json::from_str(value).map_err(err!())?;
// session_json has been updated
self.session = serde_json::from_value(session_json).map_err(err!())?;
save_session(&self.session, &self.session_path).unwrap();
alvr_events::send_event(EventType::Session(Box::new(self.session.clone())));
Ok(())
}
pub fn execute_script(&self, code: &str) -> StrResult<String> {
// Note: the scope is recreated every time to avoid cross-invocation interference
let mut scope = rhai::Scope::new();
scope.push_constant_dynamic(
"session",
rhai::serde::to_dynamic(self.session.clone()).unwrap(),
);
self.script_engine
.eval_with_scope::<rhai::Dynamic>(&mut scope, code)
.map(|d| d.to_string())
.map_err(|e| e.to_string())
}
pub fn get_gpu_vendor(&self) -> GpuVendor {
if let Some(adapter) = self.gpu_adapters.get(0) {
match adapter.get_info().vendor {
0x10de => GpuVendor::Nvidia,
0x1002 => GpuVendor::Amd,
_ => GpuVendor::Other,
}
} else {
GpuVendor::Other
}
}
pub fn get_gpu_name(&self) -> String {
self.gpu_adapters
.get(0)
.map(|a| a.get_info().name)
.unwrap_or_else(|| "".into())
}
#[cfg_attr(not(target_os = "linux"), allow(unused_variables))]
pub fn get_audio_devices_list(&self) -> StrResult<AudioDevicesList> {
#[cfg(target_os = "linux")]
let host = match self.session.to_settings().audio.linux_backend {
LinuxAudioBackend::Alsa => cpal::host_from_id(cpal::HostId::Alsa),
LinuxAudioBackend::Jack => cpal::host_from_id(cpal::HostId::Jack),
}
.map_err(err!())?;
#[cfg(not(target_os = "linux"))]
let host = cpal::default_host();
let output = host
.output_devices()
.map_err(err!())?
.filter_map(|d| d.name().ok())
.collect::<Vec<_>>();
let input = host
.input_devices()
.map_err(err!())?
.filter_map(|d| d.name().ok())
.collect::<Vec<_>>();
Ok(AudioDevicesList { output, input })
}
pub fn update_client_list(
&mut self,
hostname: String,
action: ClientListAction,
update_notifier: Option<&Notify>,
) {
let mut client_connections = self.session.client_connections.clone();
let maybe_client_entry = client_connections.entry(hostname);
let mut updated = false;
match action {
ClientListAction::AddIfMissing { display_name } => {
if let Entry::Vacant(new_entry) = maybe_client_entry {
let client_connection_desc = ClientConnectionDesc {
trusted: false,
manual_ips: HashSet::new(),
display_name,
};
new_entry.insert(client_connection_desc);
updated = true;
}
}
ClientListAction::TrustAndMaybeAddIp(maybe_ip) => {
if let Entry::Occupied(mut entry) = maybe_client_entry {
let client_connection_ref = entry.get_mut();
client_connection_ref.trusted = true;
if let Some(ip) = maybe_ip {
client_connection_ref.manual_ips.insert(ip);
}
updated = true;
}
// else: never happens. The function must be called with AddIfMissing{} first
}
ClientListAction::RemoveIpOrEntry(maybe_ip) => {
if let Entry::Occupied(mut entry) = maybe_client_entry {
if let Some(ip) = maybe_ip {
entry.get_mut().manual_ips.remove(&ip);
} else {
entry.remove_entry();
}
updated = true;
}
}
}
if updated {
self.session.client_connections = client_connections;
save_session(&self.session, &self.session_path).unwrap();
alvr_events::send_event(EventType::SessionUpdated); // deprecated
alvr_events::send_event(EventType::Session(Box::new(self.session.clone())));
if let Some(notifier) = update_notifier {
notifier.notify_waiters();
}
}
}
}
| 35.146067 | 100 | 0.546142 |
ab2d75b96ddbc79114888b79ca33febf1963c38c | 5,589 | use crate::{AnyCb, Direction, Event, EventResult, Orientation, Printer, Selector, Vec2, View};
use cursive_core::direction::Absolute;
use std::convert::TryFrom;
pub(crate) struct Node {
pub(crate) view: Option<Box<dyn View>>,
pub(crate) orientation: Orientation,
pub(crate) split_ratio_offset: i16,
total_position: Option<Vec2>,
size: Option<Vec2>,
total_size: Option<Vec2>,
}
impl Node {
pub(crate) fn new<T>(v: T, orit: Orientation) -> Self
where
T: View,
{
Self {
view: Some(Box::new(v)),
orientation: orit,
split_ratio_offset: 0,
total_position: None,
size: None,
total_size: None,
}
}
pub(crate) fn click(&self, mp: Vec2) -> bool {
if let Some(pos) = self.total_position {
if let Some(total_size) = self.total_size {
let end_pos = pos + total_size;
if !pos.fits(mp) && end_pos.fits(mp) {
return true;
}
}
}
false
}
pub(crate) fn move_offset(&mut self, direction: Absolute) -> Result<(), ()> {
if let Some(total_size) = self.total_size {
match direction {
Absolute::Left | Absolute::Up => match direction.into() {
Orientation::Horizontal => {
if i16::try_from(total_size.x).unwrap() / 2 - self.split_ratio_offset.abs()
> 1
|| self.split_ratio_offset > 0
{
self.split_ratio_offset -= 1;
Ok(())
} else {
Err(())
}
}
Orientation::Vertical => {
if i16::try_from(total_size.y).unwrap() / 2 - self.split_ratio_offset.abs()
> 1
|| self.split_ratio_offset > 0
{
self.split_ratio_offset -= 1;
Ok(())
} else {
Err(())
}
}
},
Absolute::Right | Absolute::Down => match direction.into() {
Orientation::Horizontal => {
if i16::try_from(total_size.x).unwrap() / 2 - self.split_ratio_offset.abs()
> 1
|| self.split_ratio_offset < 0
{
self.split_ratio_offset += 1;
Ok(())
} else {
Err(())
}
}
Orientation::Vertical => {
if i16::try_from(total_size.y).unwrap() / 2 - self.split_ratio_offset.abs()
> 1
|| self.split_ratio_offset < 0
{
self.split_ratio_offset += 1;
Ok(())
} else {
Err(())
}
}
},
_ => Err(()),
}
} else {
Err(())
}
}
pub(crate) fn new_empty(orit: Orientation) -> Self {
Self {
view: None,
orientation: orit,
split_ratio_offset: 0,
total_position: None,
size: None,
total_size: None,
}
}
pub(crate) fn set_pos(&mut self, pos: Vec2) {
if let Some(_) = self.view {
self.total_position = Some(pos);
}
}
pub(crate) fn has_view(&self) -> bool {
match self.view {
Some(_) => true,
None => false,
}
}
pub(crate) fn layout_view(&mut self, vec: Vec2) {
if let Some(x) = self.view.as_mut() {
let size = Vec2::min(vec, x.required_size(vec));
self.size = Some(x.required_size(vec));
x.layout(size);
}
self.total_size = Some(vec);
}
pub(crate) fn on_event(&mut self, evt: Event, zoomed: bool) -> EventResult {
if let Some(view) = self.view.as_mut() {
view.on_event(evt.relativized(if zoomed {
Vec2::new(0, 0)
} else {
self.total_position.unwrap_or(Vec2::new(0, 0))
}))
} else {
EventResult::Ignored
}
}
pub(crate) fn draw(&self, printer: &Printer) {
match self.view {
Some(ref view) => {
let printer_crop = {
if let Some(size) = self.size {
// cropped_centered is bugged here, panics on valid values
printer.cropped(size)
} else {
printer.clone()
}
};
view.draw(&printer_crop);
}
None => {}
}
}
pub(crate) fn take_focus(&mut self) -> bool {
if let Some(view) = self.view.as_mut() {
view.take_focus(Direction::none())
} else {
false
}
}
pub(crate) fn call_on_any<'a>(&mut self, slct: &Selector, cb: AnyCb<'a>) {
if let Some(view) = self.view.as_mut() {
view.call_on_any(slct, cb);
}
}
}
| 31.937143 | 99 | 0.409733 |
1d317a9882976237e38d3f071b48d39887ca4e2a | 15,374 | use super::*;
use crate::vk_description as dsc;
use ash::vk;
/// Unique ID for a particular usage (read or write) of a specific image
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct RenderGraphImageUsageId(pub(super) usize);
/// An ID for an image used within the graph between passes
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct VirtualImageId(pub(super) usize);
/// An ID for an image allocation (possibly reused)
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct PhysicalImageId(pub(super) usize);
/// An ID for an image view allocation (possibly reused)
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct PhysicalImageViewId(pub(super) usize);
/// Unique ID provided for any image registered as an output image
#[derive(Debug, Copy, Clone)]
pub struct RenderGraphOutputImageId(pub(super) usize);
/// Unique ID for a particular version of an image. Any time an image is modified, a new version is
/// produced
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct RenderGraphImageVersionId {
pub(super) index: usize,
pub(super) version: usize,
}
/// A "virtual" image that the render graph knows about. The render graph will allocate images as
/// needed, but can reuse the same image for multiple resources if the lifetimes of those images
/// don't overlap
#[derive(Debug)]
pub struct RenderGraphImageResource {
pub(super) name: Option<RenderGraphResourceName>,
pub(super) versions: Vec<RenderGraphImageResourceVersionInfo>,
}
impl RenderGraphImageResource {
pub(super) fn new() -> Self {
RenderGraphImageResource {
name: None,
versions: Default::default(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct RenderGraphImageView {
pub(super) physical_image: PhysicalImageId,
pub(super) subresource_range: dsc::ImageSubresourceRange,
pub(super) view_type: dsc::ImageViewType,
}
/// Defines what created a RenderGraphImageUsage
#[derive(Debug)]
pub enum RenderGraphImageUser {
Node(RenderGraphNodeId),
Output(RenderGraphOutputImageId),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum RenderGraphImageExtents {
MatchSurface,
// (width, height, depth)
Custom(u32, u32, u32),
}
impl RenderGraphImageExtents {
pub fn into_vk_extent_3d(
self,
swapchain_surface_info: &dsc::SwapchainSurfaceInfo,
) -> vk::Extent3D {
match self {
RenderGraphImageExtents::MatchSurface => vk::Extent3D {
width: swapchain_surface_info.extents.width,
height: swapchain_surface_info.extents.height,
depth: 1,
},
RenderGraphImageExtents::Custom(width, height, depth) => vk::Extent3D {
width,
height,
depth,
},
}
}
pub fn into_vk_extent_2d(
self,
swapchain_surface_info: &dsc::SwapchainSurfaceInfo,
) -> vk::Extent2D {
let extent_3d = self.into_vk_extent_3d(swapchain_surface_info);
vk::Extent2D {
width: extent_3d.width,
height: extent_3d.height,
}
}
}
impl Default for RenderGraphImageExtents {
fn default() -> Self {
RenderGraphImageExtents::MatchSurface
}
}
#[derive(Clone, Debug)]
pub enum RenderGraphImageSubresourceRange {
// Use the entire image
AllMipsAllLayers,
// Mip 0 with given layer
NoMipsSingleLayer(u32),
// Mip 0 layer 0
NoMipsNoLayers,
Custom(dsc::ImageSubresourceRange),
}
impl RenderGraphImageSubresourceRange {
pub fn into_subresource_range(
&self,
specification: &RenderGraphImageSpecification,
) -> dsc::ImageSubresourceRange {
match self {
RenderGraphImageSubresourceRange::AllMipsAllLayers => {
dsc::ImageSubresourceRange::default_all_mips_all_layers(
dsc::ImageAspectFlag::from_vk_image_aspect_flags(specification.aspect_flags),
specification.mip_count,
specification.layer_count,
)
}
RenderGraphImageSubresourceRange::NoMipsSingleLayer(layer) => {
dsc::ImageSubresourceRange::default_no_mips_single_layer(
dsc::ImageAspectFlag::from_vk_image_aspect_flags(specification.aspect_flags),
*layer,
)
}
RenderGraphImageSubresourceRange::NoMipsNoLayers => {
dsc::ImageSubresourceRange::default_no_mips_no_layers(
dsc::ImageAspectFlag::from_vk_image_aspect_flags(specification.aspect_flags),
)
}
RenderGraphImageSubresourceRange::Custom(custom) => custom.clone(),
}
}
}
impl Default for RenderGraphImageSubresourceRange {
fn default() -> Self {
RenderGraphImageSubresourceRange::AllMipsAllLayers
}
}
/// A usage of a particular image
#[derive(Debug)]
pub struct RenderGraphImageUsage {
pub(super) user: RenderGraphImageUser,
pub(super) usage_type: RenderGraphImageUsageType,
pub(super) version: RenderGraphImageVersionId,
pub(super) preferred_layout: dsc::ImageLayout,
pub(super) subresource_range: RenderGraphImageSubresourceRange,
pub(super) view_type: dsc::ImageViewType,
//pub(super) access_flags: vk::AccessFlags,
//pub(super) stage_flags: vk::PipelineStageFlags,
//pub(super) image_aspect_flags: vk::ImageAspectFlags,
}
/// Immutable, fully-specified attributes of an image. A *constraint* is partially specified and
/// the graph will use constraints to solve for the specification
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct RenderGraphImageSpecification {
// Rename to RenderGraphImageUsageSpecification?
pub samples: vk::SampleCountFlags,
pub format: vk::Format,
pub aspect_flags: vk::ImageAspectFlags,
pub usage_flags: vk::ImageUsageFlags,
pub create_flags: vk::ImageCreateFlags,
pub extents: RenderGraphImageExtents,
pub layer_count: u32,
pub mip_count: u32,
// image type - always 2D
// extents - always matches the render surface
// tiling - always optimal
// layout - controlled by graph
// sharing mode - always exclusive
}
impl RenderGraphImageSpecification {
/// Returns true if no fields in the two constraints are conflicting
pub fn can_merge(
&self,
other: &RenderGraphImageSpecification,
) -> bool {
if self.samples != other.samples {
return false;
}
if self.format != other.format {
return false;
}
if self.mip_count != other.mip_count {
return false;
}
if self.layer_count != other.layer_count {
return false;
}
if self.extents != other.extents {
return false;
}
true
}
/// Merge other's constraints into self, but only if there are no conflicts. No modification
/// occurs if any conflict exists
pub fn try_merge(
&mut self,
other: &RenderGraphImageSpecification,
) -> bool {
if !self.can_merge(other) {
return false;
}
self.aspect_flags |= other.aspect_flags;
self.usage_flags |= other.usage_flags;
true
}
}
/// Constraints on an image. Constraints are set per-field and start out None (i.e. unconstrained)
/// The rendergraph will derive specifications from the constraints
#[derive(Default, Clone, Debug)]
pub struct RenderGraphImageConstraint {
// Rename to RenderGraphImageUsageConstraint?
pub samples: Option<vk::SampleCountFlags>,
pub format: Option<vk::Format>,
pub aspect_flags: vk::ImageAspectFlags,
pub usage_flags: vk::ImageUsageFlags,
pub create_flags: vk::ImageCreateFlags,
pub extents: Option<RenderGraphImageExtents>,
//pub dimensions: vk::ImageSubresource
pub layer_count: Option<u32>,
pub mip_count: Option<u32>,
}
impl From<RenderGraphImageSpecification> for RenderGraphImageConstraint {
fn from(specification: RenderGraphImageSpecification) -> Self {
RenderGraphImageConstraint {
samples: Some(specification.samples),
format: Some(specification.format),
layer_count: Some(specification.layer_count),
mip_count: Some(specification.mip_count),
extents: Some(specification.extents),
aspect_flags: specification.aspect_flags,
usage_flags: specification.usage_flags,
create_flags: specification.create_flags,
}
}
}
impl RenderGraphImageConstraint {
pub fn try_convert_to_specification(self) -> Option<RenderGraphImageSpecification> {
// Format is the only thing we can't default sensibly
if self.format.is_none() {
None
} else {
Some(RenderGraphImageSpecification {
samples: self.samples.unwrap_or(vk::SampleCountFlags::TYPE_1),
format: self.format.unwrap(),
layer_count: self.layer_count.unwrap_or(1),
mip_count: self.mip_count.unwrap_or(1),
extents: self
.extents
.unwrap_or(RenderGraphImageExtents::MatchSurface),
aspect_flags: self.aspect_flags,
usage_flags: self.usage_flags,
create_flags: self.create_flags,
})
}
}
}
impl RenderGraphImageConstraint {
/// Returns true if no fields in the two constraints are conflicting
pub fn can_merge(
&self,
other: &RenderGraphImageConstraint,
) -> bool {
if self.samples.is_some() && other.samples.is_some() && self.samples != other.samples {
return false;
}
if self.format.is_some() && other.format.is_some() && self.format != other.format {
return false;
}
if self.layer_count.is_some()
&& other.layer_count.is_some()
&& self.layer_count != other.layer_count
{
return false;
}
if self.mip_count.is_some()
&& other.mip_count.is_some()
&& self.mip_count != other.mip_count
{
return false;
}
if self.extents.is_some() && other.extents.is_some() && self.extents != other.extents {
return false;
}
true
}
/// Merge other's constraints into self, but only if there are no conflicts. No modification
/// occurs if any conflict exists
pub fn try_merge(
&mut self,
other: &RenderGraphImageConstraint,
) -> bool {
if !self.can_merge(other) {
return false;
}
if self.samples.is_none() && other.samples.is_some() {
self.samples = other.samples;
}
if self.format.is_none() && other.format.is_some() {
self.format = other.format;
}
if self.layer_count.is_none() && other.layer_count.is_some() {
self.layer_count = other.layer_count;
}
if self.mip_count.is_none() && other.mip_count.is_some() {
self.mip_count = other.mip_count;
}
if self.extents.is_none() && other.extents.is_some() {
self.extents = other.extents;
}
self.aspect_flags |= other.aspect_flags;
self.usage_flags |= other.usage_flags;
self.create_flags |= other.create_flags;
true
}
/// Merge other's constraints into self. We will merge fields where we can and skip fields with
/// conflicts
pub fn partial_merge(
&mut self,
other: &RenderGraphImageConstraint,
) -> bool {
let mut complete_merge = true;
if self.samples.is_some() && other.samples.is_some() && self.samples != other.samples {
complete_merge = false;
} else if other.samples.is_some() {
self.samples = other.samples;
}
if self.format.is_some() && other.format.is_some() && self.format != other.format {
complete_merge = false;
} else if other.format.is_some() {
self.format = other.format;
}
if self.layer_count.is_some()
&& other.layer_count.is_some()
&& self.layer_count != other.layer_count
{
complete_merge = false;
} else if other.layer_count.is_some() {
self.layer_count = other.layer_count;
}
if self.mip_count.is_some()
&& other.mip_count.is_some()
&& self.mip_count != other.mip_count
{
complete_merge = false;
} else if other.mip_count.is_some() {
self.mip_count = other.mip_count;
}
if self.extents.is_some() && other.extents.is_some() && self.extents != other.extents {
complete_merge = false;
} else if other.extents.is_some() {
self.extents = other.extents;
}
self.aspect_flags |= other.aspect_flags;
self.usage_flags |= other.usage_flags;
self.create_flags |= other.create_flags;
complete_merge
}
/// Sets the constraints based on the given specification
pub fn set(
&mut self,
other: &RenderGraphImageSpecification,
) {
*self = other.clone().into();
}
}
/// How an image is being used
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum RenderGraphImageUsageType {
Create,
//Input,
Read,
ModifyRead,
ModifyWrite,
Output,
}
impl RenderGraphImageUsageType {
//TODO: Add support to see if multiple writes actually overlap
pub fn is_read_only(&self) -> bool {
match self {
RenderGraphImageUsageType::Read => true,
RenderGraphImageUsageType::Output => true,
RenderGraphImageUsageType::ModifyRead => false,
RenderGraphImageUsageType::Create => false,
//RenderGraphImageUsageType::Input => false,
RenderGraphImageUsageType::ModifyWrite => false,
}
}
}
/// Information about a specific version of the image.
#[derive(Debug)]
pub struct RenderGraphImageResourceVersionInfo {
/// What node created the image (keep in mind these are virtual images, not images provided
/// from outside the graph. So every image will have a creator node)
pub(super) creator_node: RenderGraphNodeId,
pub(super) create_usage: RenderGraphImageUsageId,
pub(super) read_usages: Vec<RenderGraphImageUsageId>,
}
impl RenderGraphImageResourceVersionInfo {
pub(super) fn new(
creator: RenderGraphNodeId,
create_usage: RenderGraphImageUsageId,
) -> Self {
RenderGraphImageResourceVersionInfo {
creator_node: creator,
create_usage,
read_usages: Default::default(),
}
}
// for redirect_image_usage
pub(super) fn remove_read_usage(
&mut self,
usage: RenderGraphImageUsageId,
) {
if let Some(position) = self.read_usages.iter().position(|x| *x == usage) {
self.read_usages.swap_remove(position);
}
}
pub(super) fn add_read_usage(
&mut self,
usage: RenderGraphImageUsageId,
) {
self.read_usages.push(usage);
}
}
| 32.366316 | 99 | 0.633147 |
fb68efe8929d1061ff23b97084c046c4fcf73f0f | 175 | fn shortest_distance(a: f64, b: f64, c: f64) -> f64 {
let mut sides = [a,b,c];
sides.sort_by(|a,b| a.partial_cmp(b).unwrap() );
sides[2].hypot(sides[0]+sides[1])
} | 35 | 53 | 0.594286 |
ffbb970510414bbdf8b610967745a3d8404a2bb5 | 497 | #![cfg(feature = "importexport")]
extern crate env_logger;
extern crate rusoto_core;
extern crate rusoto_importexport;
use rusoto_core::Region;
use rusoto_importexport::{ImportExport, ImportExportClient, ListJobsInput};
#[test]
#[ignore]
fn should_list_jobs() {
let _ = env_logger::try_init();
let client = ImportExportClient::new(Region::UsEast1);
let request = ListJobsInput::default();
let result = client.list_jobs(request).sync().unwrap();
println!("{:#?}", result);
}
| 26.157895 | 75 | 0.71831 |
21b81fdae869a76d7d9231c783d5f29a6b0bb07a | 19,977 | use super::event_file::EventCreationFlags;
use super::file_ops;
use super::file_ops::{
get_abs_path_by_fd, AccessibilityCheckFlags, AccessibilityCheckMode, ChownFlags, FcntlCmd,
FsPath, LinkFlags, StatFlags, UnlinkFlags, AT_FDCWD,
};
use super::fs_ops;
use super::time::{clockid_t, itimerspec_t, ClockID};
use super::timer_file::{TimerCreationFlags, TimerSetFlags};
use super::*;
use util::mem_util::from_user;
#[allow(non_camel_case_types)]
pub struct iovec_t {
base: *const c_void,
len: size_t,
}
pub fn do_eventfd(init_val: u32) -> Result<isize> {
do_eventfd2(init_val, 0)
}
pub fn do_eventfd2(init_val: u32, flags: i32) -> Result<isize> {
info!("eventfd: initval {}, flags {} ", init_val, flags);
let inner_flags =
EventCreationFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let file_ref: Arc<dyn File> = {
let event = EventFile::new(init_val, inner_flags)?;
Arc::new(event)
};
let fd = current!().add_file(
file_ref,
inner_flags.contains(EventCreationFlags::EFD_CLOEXEC),
);
Ok(fd as isize)
}
pub fn do_timerfd_create(clockid: clockid_t, flags: i32) -> Result<isize> {
debug!("timerfd: clockid {}, flags {} ", clockid, flags);
let clockid = ClockID::from_raw(clockid)?;
match clockid {
ClockID::CLOCK_REALTIME | ClockID::CLOCK_MONOTONIC => {}
_ => {
return_errno!(EINVAL, "invalid clockid");
}
}
let timer_create_flags =
TimerCreationFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let file_ref: Arc<dyn File> = {
let timer = TimerFile::new(clockid, timer_create_flags)?;
Arc::new(timer)
};
let fd = current!().add_file(
file_ref,
timer_create_flags.contains(TimerCreationFlags::TFD_CLOEXEC),
);
Ok(fd as isize)
}
pub fn do_timerfd_settime(
fd: FileDesc,
flags: i32,
new_value_ptr: *const itimerspec_t,
old_value_ptr: *mut itimerspec_t,
) -> Result<isize> {
from_user::check_ptr(new_value_ptr)?;
let new_value = itimerspec_t::from_raw_ptr(new_value_ptr)?;
let timer_set_flags =
TimerSetFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let current = current!();
let file = current.file(fd)?;
let timerfile = file.as_timer()?;
let old_value = timerfile.set_time(timer_set_flags, &new_value)?;
if !old_value_ptr.is_null() {
from_user::check_mut_ptr(old_value_ptr)?;
unsafe {
old_value_ptr.write(old_value);
}
}
Ok(0)
}
pub fn do_timerfd_gettime(fd: FileDesc, curr_value_ptr: *mut itimerspec_t) -> Result<isize> {
from_user::check_mut_ptr(curr_value_ptr)?;
let current = current!();
let file = current.file(fd)?;
let timerfile = file.as_timer()?;
let curr_value = timerfile.time()?;
unsafe {
curr_value_ptr.write(curr_value);
}
Ok(0)
}
pub fn do_creat(path: *const i8, mode: u16) -> Result<isize> {
let flags =
AccessMode::O_WRONLY as u32 | (CreationFlags::O_CREAT | CreationFlags::O_TRUNC).bits();
self::do_open(path, flags, mode)
}
pub fn do_open(path: *const i8, flags: u32, mode: u16) -> Result<isize> {
self::do_openat(AT_FDCWD, path, flags, mode)
}
pub fn do_openat(dirfd: i32, path: *const i8, flags: u32, mode: u16) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let fs_path = FsPath::new(&path, dirfd, false)?;
let mode = FileMode::from_bits_truncate(mode);
let fd = file_ops::do_openat(&fs_path, flags, mode)?;
Ok(fd as isize)
}
pub fn do_umask(mask: u16) -> Result<isize> {
let new_mask = FileMode::from_bits_truncate(mask).to_umask();
let old_mask = current!().process().set_umask(new_mask);
Ok(old_mask.bits() as isize)
}
pub fn do_close(fd: FileDesc) -> Result<isize> {
file_ops::do_close(fd)?;
Ok(0)
}
pub fn do_read(fd: FileDesc, buf: *mut u8, size: usize) -> Result<isize> {
let safe_buf = {
from_user::check_mut_array(buf, size)?;
unsafe { std::slice::from_raw_parts_mut(buf, size) }
};
let len = file_ops::do_read(fd, safe_buf)?;
Ok(len as isize)
}
pub fn do_write(fd: FileDesc, buf: *const u8, size: usize) -> Result<isize> {
let safe_buf = {
from_user::check_array(buf, size)?;
unsafe { std::slice::from_raw_parts(buf, size) }
};
let len = file_ops::do_write(fd, safe_buf)?;
Ok(len as isize)
}
pub fn do_writev(fd: FileDesc, iov: *const iovec_t, count: i32) -> Result<isize> {
let count = {
if count < 0 {
return_errno!(EINVAL, "Invalid count of iovec");
}
count as usize
};
from_user::check_array(iov, count)?;
let bufs_vec = {
let mut bufs_vec = Vec::with_capacity(count);
for iov_i in 0..count {
let iov_ptr = unsafe { iov.offset(iov_i as isize) };
let iov = unsafe { &*iov_ptr };
let buf = unsafe { std::slice::from_raw_parts(iov.base as *const u8, iov.len) };
bufs_vec.push(buf);
}
bufs_vec
};
let bufs = &bufs_vec[..];
let len = file_ops::do_writev(fd, bufs)?;
Ok(len as isize)
}
pub fn do_readv(fd: FileDesc, iov: *mut iovec_t, count: i32) -> Result<isize> {
let count = {
if count < 0 {
return_errno!(EINVAL, "Invalid count of iovec");
}
count as usize
};
from_user::check_array(iov, count)?;
let mut bufs_vec = {
let mut bufs_vec = Vec::with_capacity(count);
for iov_i in 0..count {
let iov_ptr = unsafe { iov.offset(iov_i as isize) };
let iov = unsafe { &*iov_ptr };
let buf = unsafe { std::slice::from_raw_parts_mut(iov.base as *mut u8, iov.len) };
bufs_vec.push(buf);
}
bufs_vec
};
let bufs = &mut bufs_vec[..];
let len = file_ops::do_readv(fd, bufs)?;
Ok(len as isize)
}
pub fn do_pread(fd: FileDesc, buf: *mut u8, size: usize, offset: off_t) -> Result<isize> {
let safe_buf = {
from_user::check_mut_array(buf, size)?;
unsafe { std::slice::from_raw_parts_mut(buf, size) }
};
let len = file_ops::do_pread(fd, safe_buf, offset)?;
Ok(len as isize)
}
pub fn do_pwrite(fd: FileDesc, buf: *const u8, size: usize, offset: off_t) -> Result<isize> {
let safe_buf = {
from_user::check_array(buf, size)?;
unsafe { std::slice::from_raw_parts(buf, size) }
};
let len = file_ops::do_pwrite(fd, safe_buf, offset)?;
Ok(len as isize)
}
pub fn do_fstat(fd: FileDesc, stat_buf: *mut Stat) -> Result<isize> {
from_user::check_mut_ptr(stat_buf)?;
let stat = file_ops::do_fstat(fd)?;
unsafe {
stat_buf.write(stat);
}
Ok(0)
}
pub fn do_stat(path: *const i8, stat_buf: *mut Stat) -> Result<isize> {
self::do_fstatat(AT_FDCWD, path, stat_buf, 0)
}
pub fn do_lstat(path: *const i8, stat_buf: *mut Stat) -> Result<isize> {
self::do_fstatat(
AT_FDCWD,
path,
stat_buf,
StatFlags::AT_SYMLINK_NOFOLLOW.bits(),
)
}
pub fn do_fstatat(dirfd: i32, path: *const i8, stat_buf: *mut Stat, flags: u32) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let flags = StatFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let fs_path = FsPath::new(&path, dirfd, flags.contains(StatFlags::AT_EMPTY_PATH))?;
from_user::check_mut_ptr(stat_buf)?;
let stat = file_ops::do_fstatat(&fs_path, flags)?;
unsafe {
stat_buf.write(stat);
}
Ok(0)
}
pub fn do_access(path: *const i8, mode: u32) -> Result<isize> {
self::do_faccessat(AT_FDCWD, path, mode, 0)
}
pub fn do_faccessat(dirfd: i32, path: *const i8, mode: u32, flags: u32) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let fs_path = FsPath::new(&path, dirfd, false)?;
let mode = AccessibilityCheckMode::from_u32(mode)?;
let flags = AccessibilityCheckFlags::from_u32(flags)?;
file_ops::do_faccessat(&fs_path, mode, flags).map(|_| 0)
}
pub fn do_lseek(fd: FileDesc, offset: off_t, whence: i32) -> Result<isize> {
let seek_from = match whence {
0 => {
// SEEK_SET
if offset < 0 {
return_errno!(EINVAL, "Invalid offset");
}
SeekFrom::Start(offset as u64)
}
1 => {
// SEEK_CUR
SeekFrom::Current(offset)
}
2 => {
// SEEK_END
SeekFrom::End(offset)
}
_ => {
return_errno!(EINVAL, "Invalid whence");
}
};
let offset = file_ops::do_lseek(fd, seek_from)?;
Ok(offset as isize)
}
pub fn do_fsync(fd: FileDesc) -> Result<isize> {
file_ops::do_fsync(fd)?;
Ok(0)
}
pub fn do_fdatasync(fd: FileDesc) -> Result<isize> {
file_ops::do_fdatasync(fd)?;
Ok(0)
}
pub fn do_truncate(path: *const i8, len: usize) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
file_ops::do_truncate(&path, len)?;
Ok(0)
}
pub fn do_ftruncate(fd: FileDesc, len: usize) -> Result<isize> {
file_ops::do_ftruncate(fd, len)?;
Ok(0)
}
pub fn do_getdents64(fd: FileDesc, buf: *mut u8, buf_size: usize) -> Result<isize> {
let safe_buf = {
from_user::check_mut_array(buf, buf_size)?;
unsafe { std::slice::from_raw_parts_mut(buf, buf_size) }
};
let len = file_ops::do_getdents64(fd, safe_buf)?;
Ok(len as isize)
}
pub fn do_getdents(fd: FileDesc, buf: *mut u8, buf_size: usize) -> Result<isize> {
let safe_buf = {
from_user::check_mut_array(buf, buf_size)?;
unsafe { std::slice::from_raw_parts_mut(buf, buf_size) }
};
let len = file_ops::do_getdents(fd, safe_buf)?;
Ok(len as isize)
}
pub fn do_sync() -> Result<isize> {
fs_ops::do_sync()?;
Ok(0)
}
pub fn do_pipe(fds_u: *mut i32) -> Result<isize> {
do_pipe2(fds_u, 0)
}
pub fn do_pipe2(fds_u: *mut i32, flags: u32) -> Result<isize> {
from_user::check_mut_array(fds_u, 2)?;
// TODO: how to deal with open flags???
let fds = pipe::do_pipe2(flags as u32)?;
unsafe {
*fds_u.offset(0) = fds[0] as c_int;
*fds_u.offset(1) = fds[1] as c_int;
}
Ok(0)
}
pub fn do_dup(old_fd: FileDesc) -> Result<isize> {
let new_fd = file_ops::do_dup(old_fd)?;
Ok(new_fd as isize)
}
pub fn do_dup2(old_fd: FileDesc, new_fd: FileDesc) -> Result<isize> {
let new_fd = file_ops::do_dup2(old_fd, new_fd)?;
Ok(new_fd as isize)
}
pub fn do_dup3(old_fd: FileDesc, new_fd: FileDesc, flags: u32) -> Result<isize> {
let new_fd = file_ops::do_dup3(old_fd, new_fd, flags)?;
Ok(new_fd as isize)
}
pub fn do_chdir(path: *const i8) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
fs_ops::do_chdir(&path)?;
Ok(0)
}
pub fn do_fchdir(fd: FileDesc) -> Result<isize> {
let path = get_abs_path_by_fd(fd)?;
fs_ops::do_chdir(&path)?;
Ok(0)
}
pub fn do_getcwd(buf_ptr: *mut u8, size: usize) -> Result<isize> {
let buf = {
from_user::check_mut_array(buf_ptr, size)?;
unsafe { std::slice::from_raw_parts_mut(buf_ptr, size) }
};
let cwd = fs_ops::do_getcwd()?;
if cwd.len() + 1 > buf.len() {
return_errno!(ERANGE, "buf is not long enough");
}
buf[..cwd.len()].copy_from_slice(cwd.as_bytes());
buf[cwd.len()] = b'\0';
// The user-level library returns the pointer of buffer, the kernel just returns
// the length of the buffer filled (which includes the ending '\0' character).
Ok((cwd.len() + 1) as isize)
}
pub fn do_rename(oldpath: *const i8, newpath: *const i8) -> Result<isize> {
self::do_renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath)
}
pub fn do_renameat(
olddirfd: i32,
oldpath: *const i8,
newdirfd: i32,
newpath: *const i8,
) -> Result<isize> {
let oldpath = from_user::clone_cstring_safely(oldpath)?
.to_string_lossy()
.into_owned();
let newpath = from_user::clone_cstring_safely(newpath)?
.to_string_lossy()
.into_owned();
let old_fs_path = FsPath::new(&oldpath, olddirfd, false)?;
let new_fs_path = FsPath::new(&newpath, newdirfd, false)?;
file_ops::do_renameat(&old_fs_path, &new_fs_path)?;
Ok(0)
}
pub fn do_mkdir(path: *const i8, mode: u16) -> Result<isize> {
self::do_mkdirat(AT_FDCWD, path, mode)
}
pub fn do_mkdirat(dirfd: i32, path: *const i8, mode: u16) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let fs_path = FsPath::new(&path, dirfd, false)?;
let mode = FileMode::from_bits_truncate(mode);
file_ops::do_mkdirat(&fs_path, mode)?;
Ok(0)
}
pub fn do_rmdir(path: *const i8) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
file_ops::do_rmdir(&path)?;
Ok(0)
}
pub fn do_link(oldpath: *const i8, newpath: *const i8) -> Result<isize> {
self::do_linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
}
pub fn do_linkat(
olddirfd: i32,
oldpath: *const i8,
newdirfd: i32,
newpath: *const i8,
flags: i32,
) -> Result<isize> {
let oldpath = from_user::clone_cstring_safely(oldpath)?
.to_string_lossy()
.into_owned();
let newpath = from_user::clone_cstring_safely(newpath)?
.to_string_lossy()
.into_owned();
let flags = LinkFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let old_fs_path = FsPath::new(&oldpath, olddirfd, flags.contains(LinkFlags::AT_EMPTY_PATH))?;
let new_fs_path = FsPath::new(&newpath, newdirfd, false)?;
file_ops::do_linkat(&old_fs_path, &new_fs_path, flags)?;
Ok(0)
}
pub fn do_unlink(path: *const i8) -> Result<isize> {
self::do_unlinkat(AT_FDCWD, path, 0)
}
pub fn do_unlinkat(dirfd: i32, path: *const i8, flags: i32) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let fs_path = FsPath::new(&path, dirfd, false)?;
let flags =
UnlinkFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flag value"))?;
file_ops::do_unlinkat(&fs_path, flags)?;
Ok(0)
}
pub fn do_readlink(path: *const i8, buf: *mut u8, size: usize) -> Result<isize> {
self::do_readlinkat(AT_FDCWD, path, buf, size)
}
pub fn do_readlinkat(dirfd: i32, path: *const i8, buf: *mut u8, size: usize) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let buf = {
from_user::check_array(buf, size)?;
unsafe { std::slice::from_raw_parts_mut(buf, size) }
};
let fs_path = FsPath::new(&path, dirfd, false)?;
let len = file_ops::do_readlinkat(&fs_path, buf)?;
Ok(len as isize)
}
pub fn do_symlink(target: *const i8, link_path: *const i8) -> Result<isize> {
self::do_symlinkat(target, AT_FDCWD, link_path)
}
pub fn do_symlinkat(target: *const i8, new_dirfd: i32, link_path: *const i8) -> Result<isize> {
let target = from_user::clone_cstring_safely(target)?
.to_string_lossy()
.into_owned();
let link_path = from_user::clone_cstring_safely(link_path)?
.to_string_lossy()
.into_owned();
let fs_path = FsPath::new(&link_path, new_dirfd, false)?;
file_ops::do_symlinkat(&target, &fs_path)?;
Ok(0)
}
pub fn do_chmod(path: *const i8, mode: u16) -> Result<isize> {
self::do_fchmodat(AT_FDCWD, path, mode)
}
pub fn do_fchmod(fd: FileDesc, mode: u16) -> Result<isize> {
let mode = FileMode::from_bits_truncate(mode);
file_ops::do_fchmod(fd, mode)?;
Ok(0)
}
pub fn do_fchmodat(dirfd: i32, path: *const i8, mode: u16) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let mode = FileMode::from_bits_truncate(mode);
let fs_path = FsPath::new(&path, dirfd, false)?;
file_ops::do_fchmodat(&fs_path, mode)?;
Ok(0)
}
pub fn do_chown(path: *const i8, uid: u32, gid: u32) -> Result<isize> {
self::do_fchownat(AT_FDCWD, path, uid, gid, 0)
}
pub fn do_fchown(fd: FileDesc, uid: u32, gid: u32) -> Result<isize> {
file_ops::do_fchown(fd, uid, gid)?;
Ok(0)
}
pub fn do_fchownat(dirfd: i32, path: *const i8, uid: u32, gid: u32, flags: i32) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let flags = ChownFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let fs_path = FsPath::new(&path, dirfd, flags.contains(ChownFlags::AT_EMPTY_PATH))?;
file_ops::do_fchownat(&fs_path, uid, gid, flags)?;
Ok(0)
}
pub fn do_lchown(path: *const i8, uid: u32, gid: u32) -> Result<isize> {
self::do_fchownat(
AT_FDCWD,
path,
uid,
gid,
ChownFlags::AT_SYMLINK_NOFOLLOW.bits(),
)
}
pub fn do_sendfile(
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: *mut off_t,
count: usize,
) -> Result<isize> {
let offset = if offset_ptr.is_null() {
None
} else {
from_user::check_mut_ptr(offset_ptr)?;
Some(unsafe { offset_ptr.read() })
};
let (len, offset) = file_ops::do_sendfile(out_fd, in_fd, offset, count)?;
if !offset_ptr.is_null() {
unsafe {
offset_ptr.write(offset as off_t);
}
}
Ok(len as isize)
}
pub fn do_fcntl(fd: FileDesc, cmd: u32, arg: u64) -> Result<isize> {
let mut cmd = FcntlCmd::from_raw(cmd, arg)?;
file_ops::do_fcntl(fd, &mut cmd)
}
pub fn do_ioctl(fd: FileDesc, cmd: u32, argp: *mut u8) -> Result<isize> {
let mut ioctl_cmd = unsafe {
if argp.is_null() == false {
from_user::check_mut_ptr(argp)?;
}
IoctlCmd::new(cmd, argp)?
};
file_ops::do_ioctl(fd, &mut ioctl_cmd)?;
Ok(0)
}
pub fn do_mount_rootfs(
key_ptr: *const sgx_key_128bit_t,
occlum_json_mac_ptr: *const sgx_aes_gcm_128bit_tag_t,
) -> Result<isize> {
let key = if key_ptr.is_null() {
None
} else {
Some(unsafe { key_ptr.read() })
};
if occlum_json_mac_ptr.is_null() {
return_errno!(EINVAL, "occlum_json_mac_ptr cannot be null");
}
let expected_occlum_json_mac = unsafe { occlum_json_mac_ptr.read() };
let user_config_path = unsafe { format!("{}{}", INSTANCE_DIR, "/build/Occlum.json.protected") };
let user_config = config::load_config(&user_config_path, &expected_occlum_json_mac)?;
fs_ops::do_mount_rootfs(&user_config, &key)?;
Ok(0)
}
pub fn do_fallocate(fd: FileDesc, mode: u32, offset: off_t, len: off_t) -> Result<isize> {
if offset < 0 || len <= 0 {
return_errno!(
EINVAL,
"offset was less than 0, or len was less than or equal to 0"
);
}
// Current implementation is just the posix_fallocate
// TODO: Support more modes in fallocate
if mode != 0 {
return_errno!(ENOSYS, "unsupported mode");
}
file_ops::do_fallocate(fd, mode, offset as u64, len as u64)?;
Ok(0)
}
pub fn do_fstatfs(fd: FileDesc, statfs_buf: *mut Statfs) -> Result<isize> {
from_user::check_mut_ptr(statfs_buf)?;
let statfs = fs_ops::do_fstatfs(fd)?;
unsafe {
statfs_buf.write(statfs);
}
Ok(0)
}
pub fn do_statfs(path: *const i8, statfs_buf: *mut Statfs) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let statfs = fs_ops::do_statfs(&path)?;
unsafe {
statfs_buf.write(statfs);
}
Ok(0)
}
| 30.268182 | 100 | 0.624268 |
5dfe954ff9a86d20ebd1fa63b4b67613c1138a65 | 802 | use language::operations::{make_param_doc, Operation, ParamInfo};
pub struct FaceKeysGetAgeOp;
const DOC : &str = "Version 1.161+. Unpacks age slider value from face keys string. Values are in the range of 0..63.";
pub const OP_CODE: u32 = 2762;
pub const IDENT: &str = "face_keys_get_age";
impl Operation for FaceKeysGetAgeOp {
fn op_code(&self) -> u32 {
OP_CODE
}
fn documentation(&self) -> &'static str {
DOC
}
fn identifier(&self) -> &'static str {
IDENT
}
fn param_info(&self) -> ParamInfo {
ParamInfo {
num_required: 2,
num_optional: 0,
param_docs: vec![
make_param_doc("<destination>", ""),
make_param_doc("<string_no>", ""),
],
}
}
}
| 22.914286 | 119 | 0.567332 |
fe175144ec08e37031ca9d9ca09b274512b2ee38 | 553 | mod app_store;
mod deadmanswitch;
mod dns;
mod http;
#[cfg(feature = "ping")]
mod ping;
mod play_store;
mod tcp;
mod tls;
mod udp;
mod unsupported;
mod whois;
#[cfg(feature = "ping")]
pub use self::ping::Ping;
pub use self::{
app_store::AppStore,
deadmanswitch::DeadManSwitch,
dns::{Dns, DnsRecord},
http::{Http, HttpHeaders},
play_store::PlayStore,
tcp::Tcp,
tls::Tls,
udp::Udp,
unsupported::Unsupported,
whois::Whois,
};
pub trait SpecMeta {
fn name(&self) -> &'static str;
fn fields(&self) -> Vec<(&'static str, String)>;
}
| 16.757576 | 50 | 0.665461 |
e2df35c298112bf6b9039021b1132920ded68051 | 299 | mod lexer;
mod parser;
pub fn run(source: &str) {
let lexer = lexer::lexer();
let iter = lexer.src_iter(source);
let ast = parser::Parser::parse(iter);
match ast {
Ok(exprs) => for expr in exprs { println!("{:?}\n", expr) }
Err(err)=> println!("{:?}", err)
}
}
| 21.357143 | 67 | 0.541806 |
f4c8292f81aaaf3544f2ef67c3e1545621ed00a6 | 787 | use displaydoc::Display;
use std::io;
use thiserror::Error;
#[derive(Debug, Error, Display)]
pub enum PakError {
/// error: {0}
CustomError(String),
/// io-error: {0}
IoError(io::Error),
/// this is not a pakken project.
NotAProject,
/// could not read project file
ProjectReadError,
/// serialization error: {0}
SerializationError(ron::ser::Error),
/// could not locate the target `{0}`
TargetNotFound(String),
/// parser error: {0}
ParserError(String),
}
impl From<io::Error> for PakError {
fn from(err: io::Error) -> Self { PakError::IoError(err) }
}
impl From<ron::ser::Error> for PakError {
fn from(err: ron::ser::Error) -> Self { PakError::SerializationError(err) }
}
pub type PakResult<T> = Result<T, PakError>;
| 24.59375 | 79 | 0.636595 |
56e058bfab9953dea9d63fa286c39614320b72a6 | 7,307 | // Copyright 2017-2019 `multipart-async` Crate Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::error::Error;
use std::future::Future;
use std::io::{Cursor};
use std::path::Path;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::Stream;
use futures_util::TryStreamExt;
use http::header::HeaderName;
use mime::Mime;
use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
pub struct MultipartWriter<W> {
inner: W,
boundary: String,
data_written: bool,
}
impl<W> MultipartWriter<W> {
pub(crate) fn new(inner: W, boundary: String) -> Self {
MultipartWriter {
inner,
boundary,
data_written: false,
}
}
fn get_field_header(
&self,
name: &str,
filename: Option<&str>,
content_type: Option<&Mime>,
) -> String {
use std::fmt::Write;
let mut header = format!(
"--{}\r\nContent-Disposition: form-data; name=\"{}\"",
self.boundary, name
);
if let Some(filename) = filename {
write!(header, "; filename=\"{}\"", filename).unwrap();
}
if let Some(content_type) = content_type {
write!(header, "\r\nContent-Type: {}", content_type);
}
header.push_str("\r\n\r\n");
header
}
pub fn get_ref(&self) -> &W {
&self.inner
}
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
pub fn into_inner(self) -> W {
self.inner
}
}
impl<W: AsyncWrite + Unpin> MultipartWriter<W> {
async fn write_field_header(
&mut self,
name: &str,
filename: Option<&str>,
content_type: Option<&Mime>,
) -> io::Result<()> {
let mut header = Cursor::new(self.get_field_header(name, filename, content_type));
io::copy(&mut header, &mut self.inner).await?;
self.data_written = true;
Ok(())
}
/// Write a field of any type to the output. (Method for taking `AsyncRead`).
///
/// If `content_type` is not set, the server assumes `Content-Type: text/plain`
/// ([RFC 7578 Section 4.4][7578-4.4]).
///
/// Typically, if `filename` and `content_type` are omitted, the server will interpret it as a
/// non-file text field like with `application/x-www-form-urlencoded` form fields. Unless
/// a charset is manually specified, the server *should* assume the text encoding is UTF-8.
///
/// If `filename` is provided even if `content_type` is not set, it may cause the
/// server to interpret the field as a text file instead of a text field.
///
/// If you want the server to interpret a field as a file regardless of type or filename,
/// pass a `content_type` of `mime::APPLICATION_OCTET_STREAM`.
///
/// [7578-4.4]: https://tools.ietf.org/html/rfc7578#section-4.4
pub async fn write_field<R: AsyncRead + Unpin>(
&mut self,
name: &str,
filename: Option<&str>,
content_type: Option<&Mime>,
mut contents: R,
) -> io::Result<&mut Self> {
self.write_field_header(name, filename, content_type)
.await?;
io::copy(&mut contents, &mut self.inner).await?;
self.inner.write_all(b"\r\n").await?;
Ok(self)
}
/// Like [`.write_field()`](#method.write_field) but takes a `Stream`.
/// See that method for details on these parameters.
///
/// Errors from the stream will be wrapped as `io::ErrorKind::Other`.
pub async fn write_stream<B, E, S>(
&mut self,
name: &str,
filename: Option<&str>,
content_type: Option<&Mime>,
mut contents: S,
) -> io::Result<&mut Self>
where
B: AsRef<[u8]>,
E: Into<Box<dyn Error + Send + Sync>>,
S: Stream<Item = Result<B, E>> + Unpin,
{
let mut contents = contents.map_err(|e| io::Error::new(io::ErrorKind::Other, e));
while let Some(buf) = contents.try_next().await? {
self.inner.write_all(buf.as_ref()).await?;
}
self.inner.write_all(b"\r\n").await?;
Ok(self)
}
/// Open a file for reading and copy it as a field to the output, inferring the filename
/// and content-type from the path.
///
/// If no content-type is known for the path extension or there is no extension,
/// `application/octet-stream` is assumed to ensure the server interprets this field as a file.
///
/// If you want to override the filename or content-type, use
/// [`.write_field()`](#method.write_field) instead.
#[cfg(feature = "tokio-fs")]
pub async fn write_file<P: AsRef<Path>>(
&mut self,
name: &str,
path: P,
) -> io::Result<&mut Self> {
let path = path.as_ref();
let filename = path.file_name().and_then(|s| s.to_str());
let content_type = mime_guess::from_path(path).first_or_octet_stream();
let file = tokio_fs::File::open(path)?;
self.write_field(name, filename, Some(&content_type), file)
}
/// Write a plain text field to the output.
///
/// The server must assume `Content-Type: text/plain` ([RFC 7578 Section 4.4][7578-4.4]).
/// Typically, the server will interpret it as a non-file text field like with
/// `application/x-www-form-urlencoded` form fields.
///
/// If you want to pass a string but still set the filename and/or content type,
/// convert it to bytes with `.as_bytes()` and pass it to [`.write_field()`](#method.write_field)
/// instead, as byte slices implement `AsyncRead`.
pub async fn write_text(&mut self, name: &str, text: &str) -> io::Result<&mut Self> {
self.write_field(name, None, None, text.as_bytes()).await
}
/// Complete the `multipart/form-data` request.
///
/// Writes the trailing boundary and flushes the output.
///
/// The request should be closed at this point as the server must ignore all data outside
/// the multipart body.
pub async fn finish(&mut self) -> io::Result<()> {
if self.data_written {
self.inner.write_all(b"--").await?;
self.inner.write_all(self.boundary.as_bytes()).await?;
// trailing newline isn't necessary per the spec but some clients are expecting it
// https://github.com/actix/actix-web/issues/598
self.inner.write_all(b"--\r\n").await?;
}
self.inner.flush().await?;
Ok(())
}
}
#[cfg(test)]
#[tokio::test]
async fn test_multipart_writer_one_text_field() -> io::Result<()> {
let mut writer = MultipartWriter {
inner: Vec::<u8>::new(),
boundary: "boundary".to_string(),
data_written: false,
};
writer.write_text("hello", "world!").await?.finish().await?;
assert_eq!(
writer.inner,
&b"--boundary\r\n\
Content-Disposition: form-data; name=\"hello\"\r\n\r\n\
world!\r\n\
--boundary--\r\n"[..]
);
Ok(())
}
| 33.213636 | 101 | 0.595867 |
eb8973c482933b10fb1d667b37a640db6509a17a | 3,718 | use bytesize::ByteSize;
use failure::Error;
#[macro_use]
extern crate log;
use futures::future::join_all;
use loqui_bench_common::{configure_logging, make_socket_address};
use loqui_client::{Client, Config};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::task::spawn;
use tokio::time::delay_for;
#[derive(Default)]
struct State {
request_count: AtomicUsize,
failed_requests: AtomicUsize,
in_flight: AtomicUsize,
max_age: AtomicUsize,
request_time: AtomicUsize,
}
fn make_message() -> Vec<u8> {
b"hello world".to_vec()
}
async fn do_work(client: Arc<Client>, state: Arc<State>) {
let message = make_message();
let start = Instant::now();
state.in_flight.fetch_add(1, Ordering::SeqCst);
match client.request(message).await {
Ok(payload) => {
if &payload[..] != b"hello world" {
state.failed_requests.fetch_add(1, Ordering::SeqCst);
} else {
state.request_count.fetch_add(1, Ordering::SeqCst);
}
}
Err(e) => {
state.failed_requests.fetch_add(1, Ordering::SeqCst);
dbg!(e);
}
}
let age = Instant::now().duration_since(start).as_micros() as usize;
state.request_time.fetch_add(age, Ordering::SeqCst);
if age > state.max_age.load(Ordering::SeqCst) {
state.max_age.store(age, Ordering::SeqCst)
}
state.in_flight.fetch_sub(1, Ordering::SeqCst);
}
async fn work_loop(client: Arc<Client>, state: Arc<State>) {
loop {
do_work(client.clone(), state.clone()).await;
}
}
async fn log_loop(state: Arc<State>) {
let mut last_request_count = 0;
let mut last = Instant::now();
loop {
delay_for(Duration::from_secs(1)).await;
let now = Instant::now();
let elapsed = now.duration_since(last).as_millis() as f64 / 1000.0;
let request_count = state.request_count.load(Ordering::SeqCst);
let req_sec = (request_count - last_request_count) as f64 / elapsed;
let avg_time = if request_count > last_request_count {
state.request_time.load(Ordering::SeqCst) / (request_count - last_request_count)
} else {
0
};
let failed_requests = state.failed_requests.load(Ordering::SeqCst);
let in_flight = state.in_flight.load(Ordering::SeqCst);
let max_age = state.max_age.load(Ordering::SeqCst);
info!(
"{} total requests ({}/sec). last log {} sec ago. {} failed, {} in flight, {} µs max, {} µs avg response time",
request_count, req_sec, elapsed, failed_requests, in_flight, max_age, avg_time
);
last_request_count = request_count;
last = now;
state.max_age.store(0, Ordering::SeqCst);
state.request_time.store(0, Ordering::SeqCst);
}
}
#[tokio::main]
async fn main() -> Result<(), Error> {
let state = Arc::new(State::default());
let log_state = state.clone();
configure_logging()?;
spawn(log_loop(log_state.clone()));
let config = Config {
max_payload_size: ByteSize::kb(5000),
request_timeout: Duration::from_secs(5),
handshake_timeout: Duration::from_secs(5),
supported_encodings: &["msgpack", "identity"],
};
let client = Arc::new(
Client::start_connect(make_socket_address(), config)
.await
.expect("Failed to connect"),
);
client.await_ready().await.expect("Ready failed");
let mut work_futures = vec![];
for _ in 0..100 {
work_futures.push(work_loop(client.clone(), state.clone()));
}
join_all(work_futures).await;
Ok(())
}
| 31.508475 | 123 | 0.625336 |
7affad8da8aa917fbbe312792d0d227b66ac4663 | 10,336 | use futures03::TryStreamExt;
use graph::parking_lot::Mutex;
use graph::tokio_stream::wrappers::ReceiverStream;
use std::collections::BTreeSet;
use std::sync::{atomic::Ordering, Arc, RwLock};
use std::{collections::HashMap, sync::atomic::AtomicUsize};
use tokio::sync::mpsc::{channel, Sender};
use tokio::sync::watch;
use uuid::Uuid;
use crate::notification_listener::{NotificationListener, SafeChannelName};
use graph::components::store::{SubscriptionManager as SubscriptionManagerTrait, UnitStream};
use graph::prelude::serde_json;
use graph::{prelude::*, tokio_stream};
pub struct StoreEventListener {
notification_listener: NotificationListener,
}
impl StoreEventListener {
pub fn new(
logger: Logger,
postgres_url: String,
registry: Arc<impl MetricsRegistry>,
) -> (Self, Box<dyn Stream<Item = StoreEvent, Error = ()> + Send>) {
let channel = SafeChannelName::i_promise_this_is_safe("store_events");
let (notification_listener, receiver) =
NotificationListener::new(&logger, postgres_url, channel.clone());
let counter = registry
.global_counter_vec(
"notification_queue_recvd",
"Number of messages received through Postgres LISTEN",
vec!["channel", "network"].as_slice(),
)
.unwrap()
.with_label_values(&[channel.as_str(), "none"]);
let event_stream = Box::new(
ReceiverStream::new(receiver)
.map(Result::<_, ()>::Ok)
.compat()
.filter_map(move |notification| {
// When graph-node is starting up, it is possible that
// Postgres still has old messages queued up that we
// can't decode anymore. It is safe to skip them; once
// We've seen 10 valid messages, we can assume that
// whatever old messages Postgres had queued have been
// cleared. Seeing an invalid message after that
// definitely indicates trouble.
let num_valid = AtomicUsize::new(0);
serde_json::from_value(notification.payload.clone()).map_or_else(
|_err| {
error!(
&logger,
"invalid store event received from database: {:?}",
notification.payload
);
if num_valid.load(Ordering::SeqCst) > 10 {
panic!(
"invalid store event received from database: {:?}",
notification.payload
);
}
None
},
|change| {
num_valid.fetch_add(1, Ordering::SeqCst);
counter.inc();
Some(change)
},
)
}),
);
(
StoreEventListener {
notification_listener,
},
event_stream,
)
}
pub fn start(&mut self) {
self.notification_listener.start()
}
}
struct Watcher<T> {
sender: Arc<watch::Sender<T>>,
receiver: watch::Receiver<T>,
}
impl<T: Clone + Debug + Send + Sync + 'static> Watcher<T> {
fn new(init: T) -> Self {
let (sender, receiver) = watch::channel(init);
Watcher {
sender: Arc::new(sender),
receiver,
}
}
fn send(&self, v: T) {
// Unwrap: `self` holds a receiver.
self.sender.send(v).unwrap()
}
fn stream(&self) -> Box<dyn futures03::Stream<Item = T> + Unpin + Send + Sync> {
Box::new(tokio_stream::wrappers::WatchStream::new(
self.receiver.clone(),
))
}
/// Outstanding receivers returned from `Self::stream`.
fn receiver_count(&self) -> usize {
// Do not count the internal receiver.
self.sender.receiver_count() - 1
}
}
/// Manage subscriptions to the `StoreEvent` stream. Keep a list of
/// currently active subscribers and forward new events to each of them
pub struct SubscriptionManager {
// These are more efficient since only one entry is stored per filter.
subscriptions_no_payload: Arc<Mutex<HashMap<BTreeSet<SubscriptionFilter>, Watcher<()>>>>,
subscriptions:
Arc<RwLock<HashMap<String, (Arc<BTreeSet<SubscriptionFilter>>, Sender<Arc<StoreEvent>>)>>>,
/// Keep the notification listener alive
listener: StoreEventListener,
}
impl SubscriptionManager {
pub fn new(logger: Logger, postgres_url: String, registry: Arc<impl MetricsRegistry>) -> Self {
let (listener, store_events) = StoreEventListener::new(logger, postgres_url, registry);
let mut manager = SubscriptionManager {
subscriptions_no_payload: Arc::new(Mutex::new(HashMap::new())),
subscriptions: Arc::new(RwLock::new(HashMap::new())),
listener,
};
// Deal with store subscriptions
manager.handle_store_events(store_events);
manager.periodically_clean_up_stale_subscriptions();
manager.listener.start();
manager
}
/// Receive store events from Postgres and send them to all active
/// subscriptions. Detect stale subscriptions in the process and
/// close them.
fn handle_store_events(
&self,
store_events: Box<dyn Stream<Item = StoreEvent, Error = ()> + Send>,
) {
let subscriptions = self.subscriptions.cheap_clone();
let subscriptions_no_payload = self.subscriptions_no_payload.cheap_clone();
let mut store_events = store_events.compat();
// This channel is constantly receiving things and there are locks involved,
// so it's best to use a blocking task.
graph::spawn_blocking(async move {
while let Some(Ok(event)) = store_events.next().await {
let event = Arc::new(event);
// Send to `subscriptions`.
{
let senders = subscriptions.read().unwrap().clone();
// Write change to all matching subscription streams; remove subscriptions
// whose receiving end has been dropped
for (id, (_, sender)) in senders
.iter()
.filter(|(_, (filter, _))| event.matches(filter))
{
if sender.send(event.cheap_clone()).await.is_err() {
// Receiver was dropped
subscriptions.write().unwrap().remove(id);
}
}
}
// Send to `subscriptions_no_payload`.
{
let watchers = subscriptions_no_payload.lock();
// Write change to all matching subscription streams
for (_, watcher) in watchers.iter().filter(|(filter, _)| event.matches(filter))
{
watcher.send(());
}
}
}
});
}
fn periodically_clean_up_stale_subscriptions(&self) {
let subscriptions = self.subscriptions.cheap_clone();
let subscriptions_no_payload = self.subscriptions_no_payload.cheap_clone();
// Clean up stale subscriptions every 5s
graph::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(5));
loop {
interval.tick().await;
// Cleanup `subscriptions`.
{
let mut subscriptions = subscriptions.write().unwrap();
// Obtain IDs of subscriptions whose receiving end has gone
let stale_ids = subscriptions
.iter_mut()
.filter_map(|(id, (_, sender))| match sender.is_closed() {
true => Some(id.clone()),
false => None,
})
.collect::<Vec<_>>();
// Remove all stale subscriptions
for id in stale_ids {
subscriptions.remove(&id);
}
}
// Cleanup `subscriptions_no_payload`.
{
let mut subscriptions = subscriptions_no_payload.lock();
// Obtain IDs of subscriptions whose receiving end has gone
let stale_ids = subscriptions
.iter_mut()
.filter_map(|(id, watcher)| match watcher.receiver_count() == 0 {
true => Some(id.clone()),
false => None,
})
.collect::<Vec<_>>();
// Remove all stale subscriptions
for id in stale_ids {
subscriptions.remove(&id);
}
}
}
});
}
}
impl SubscriptionManagerTrait for SubscriptionManager {
fn subscribe(&self, entities: BTreeSet<SubscriptionFilter>) -> StoreEventStreamBox {
let id = Uuid::new_v4().to_string();
// Prepare the new subscription by creating a channel and a subscription object
let (sender, receiver) = channel(100);
// Add the new subscription
self.subscriptions
.write()
.unwrap()
.insert(id, (Arc::new(entities.clone()), sender));
// Return the subscription ID and entity change stream
StoreEventStream::new(Box::new(ReceiverStream::new(receiver).map(Ok).compat()))
.filter_by_entities(entities)
}
fn subscribe_no_payload(&self, entities: BTreeSet<SubscriptionFilter>) -> UnitStream {
self.subscriptions_no_payload
.lock()
.entry(entities)
.or_insert_with(|| Watcher::new(()))
.stream()
}
}
| 37.046595 | 99 | 0.527283 |
d5bb2fa9e990ca49eed63153a848bca7fabd16ce | 1,149 | use std::{
error::Error,
fs::File,
io::{BufRead, BufReader},
path::PathBuf,
};
mod parser;
use parser::{parse, ParseError};
fn part1(lines: impl Iterator<Item = impl AsRef<str>>) -> Result<(), ParseError> {
let mut result = 0;
for line in lines {
result += parse(line.as_ref(), false)?;
}
println!("Part 1: result = {}", result);
Ok(())
}
fn part2(lines: impl Iterator<Item = impl AsRef<str>>) -> Result<(), ParseError> {
let mut result = 0;
for line in lines {
result += parse(line.as_ref(), true)?;
}
println!("Part 2: result = {}", result);
Ok(())
}
fn run() -> Result<(), Box<dyn Error>> {
let lines = {
let path = ["data", "day18", "input.txt"].iter().collect::<PathBuf>();
let file = File::open(path)?;
BufReader::new(file)
.lines()
.collect::<Result<Vec<_>, _>>()?
};
part1(lines.iter())?;
part2(lines.iter())?;
Ok(())
}
fn main() {
std::process::exit(match run() {
Ok(_) => 0,
Err(e) => {
eprintln!("Error occurred: {}", e);
1
}
});
}
| 20.517857 | 82 | 0.497824 |
fe4b1c3434df061627f81125c18f2f0b4d50ca5d | 6,223 | use std::fs::File;
use std::io::{BufRead, BufReader};
type CoordinateUnit = f32;
type Coordinates = (CoordinateUnit, CoordinateUnit);
// --------------------- Operation ---------------------
#[derive(Copy, Clone)]
enum Operation {
MoveNorth,
MoveSouth,
MoveEast,
MoveWest,
RotateLeft,
RotateRight,
MoveForward,
}
// --------------------- Ferry ---------------------
struct Ferry {
// Assuming East is direction 0
position : Coordinates,
direction : CoordinateUnit
}
impl Ferry {
pub fn new() -> Ferry {
Ferry {
position : (0.0, 0.0),
direction : 0.0
}
}
fn get_position(&self) -> Coordinates { self.position }
fn move_north(&mut self, value : CoordinateUnit) { self.position.1 = self.position.1 + value }
fn move_south(&mut self, value : CoordinateUnit) { self.position.1 = self.position.1 - value }
fn move_east(&mut self, value : CoordinateUnit) { self.position.0 = self.position.0 + value }
fn move_west(&mut self, value : CoordinateUnit) { self.position.0 = self.position.0 - value }
fn rotate_left(&mut self, value : CoordinateUnit) { self.direction = self.direction + value }
fn rotate_right(&mut self, value : CoordinateUnit) { self.direction = self.direction - value }
fn move_forward(&mut self, value : CoordinateUnit) {
self.position.0 = self.position.0 + value * self.direction.to_radians().cos();
self.position.1 = self.position.1 + value * self.direction.to_radians().sin();
}
fn run_operation(&mut self, operation : Operation, value : CoordinateUnit) {
match operation {
Operation::MoveNorth => self.move_north(value),
Operation::MoveSouth => self.move_south(value),
Operation::MoveEast => self.move_east(value),
Operation::MoveWest => self.move_west(value),
Operation::RotateLeft => self.rotate_left(value),
Operation::RotateRight => self.rotate_right(value),
Operation::MoveForward => self.move_forward(value),
}
}
}
fn get_operation_to_code(code : &str) -> Operation {
let operation : Operation = match code {
"N" => Operation::MoveNorth,
"S" => Operation::MoveSouth,
"E" => Operation::MoveEast,
"W" => Operation::MoveWest,
"L" => Operation::RotateLeft,
"R" => Operation::RotateRight,
"F" => Operation::MoveForward,
_ => panic!("Option not recognized!")
};
return operation;
}
// --------------------- Waypoint ---------------------
struct Waypoint {
// Assuming East is direction 0
position : Coordinates,
waypoint : Coordinates,
}
impl Waypoint {
pub fn new() -> Waypoint {
Waypoint {
position : (0.0, 0.0),
waypoint : (10.0, 1.0),
}
}
fn get_position(&self) -> Coordinates { self.position }
fn move_north(&mut self, value : CoordinateUnit) { self.waypoint.1 = self.waypoint.1 + value }
fn move_south(&mut self, value : CoordinateUnit) { self.waypoint.1 = self.waypoint.1 - value }
fn move_east(&mut self, value : CoordinateUnit) { self.waypoint.0 = self.waypoint.0 + value }
fn move_west(&mut self, value : CoordinateUnit) { self.waypoint.0 = self.waypoint.0 - value }
fn rotate_left(&mut self, value : CoordinateUnit) {
let east_vector : Coordinates = (1.0, 0.0);
let angle : CoordinateUnit = (self.waypoint.1.atan2(self.waypoint.0) - east_vector.1.atan2(east_vector.0)).to_degrees();
let radius : CoordinateUnit = (self.waypoint.0.powf(2.0) + self.waypoint.1.powf(2.0)).sqrt();
let angle : CoordinateUnit = angle + value;
self.waypoint = (radius * angle.to_radians().cos(), radius * angle.to_radians().sin());
}
fn rotate_right(&mut self, value : CoordinateUnit) {
let east_vector : Coordinates = (1.0, 0.0);
let angle : CoordinateUnit = (self.waypoint.1.atan2(self.waypoint.0) - east_vector.1.atan2(east_vector.0)).to_degrees();
let radius : CoordinateUnit = (self.waypoint.0.powf(2.0) + self.waypoint.1.powf(2.0)).sqrt();
let angle : CoordinateUnit = angle - value;
self.waypoint = (radius * angle.to_radians().cos(), radius * angle.to_radians().sin());
}
fn move_forward(&mut self, value : CoordinateUnit) {
self.position.0 = self.position.0 + value * self.waypoint.0;
self.position.1 = self.position.1 + value * self.waypoint.1;
}
fn run_operation(&mut self, operation : Operation, value : CoordinateUnit) {
match operation {
Operation::MoveNorth => self.move_north(value),
Operation::MoveSouth => self.move_south(value),
Operation::MoveEast => self.move_east(value),
Operation::MoveWest => self.move_west(value),
Operation::RotateLeft => self.rotate_left(value),
Operation::RotateRight => self.rotate_right(value),
Operation::MoveForward => self.move_forward(value),
}
}
}
fn main() {
let filename = "src/input.txt";
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
let data : Vec<String> = reader.lines()
.collect::<Result<_, _>>().unwrap();
let mut ferry : Ferry = Ferry::new();
let mut waypoint : Waypoint = Waypoint::new();
for line in data.iter() {
let operation : &str = &line[..1];
let value : &str = &line[1..];
let operation : Operation = get_operation_to_code(operation);
let value : CoordinateUnit = match value.parse() {
Ok(value) => value,
Err(e) => panic!("{}", e),
};
// Update Ferry (Part 1)
ferry.run_operation(operation, value);
// Update Waypoint (Part 2)
waypoint.run_operation(operation, value);
}
let current_position : Coordinates = ferry.get_position();
println!("Current ferry position (part 1): ({}, {})", current_position.0, current_position.1);
let current_position : Coordinates = waypoint.get_position();
println!("Current ferry position (part 2): ({}, {})", current_position.0, current_position.1);
}
| 36.391813 | 128 | 0.605817 |
0ee65bb22477c1a4ce0b1bb35c6aa0b548a8872e | 38,162 | //! HTTP Client
//!
//! There are two levels of APIs provided for construct HTTP clients:
//!
//! - The higher-level [`Client`](Client) type.
//! - The lower-level [`conn`](client::conn) module.
//!
//! # Client
//!
//! The [`Client`](Client) is the main way to send HTTP requests to a server.
//! The default `Client` provides these things on top of the lower-level API:
//!
//! - A default **connector**, able to resolve hostnames and connect to
//! destinations over plain-text TCP.
//! - A **pool** of existing connections, allowing better performance when
//! making multiple requests to the same hostname.
//! - Automatic setting of the `Host` header, based on the request `Uri`.
//! - Automatic request **retries** when a pooled connection is closed by the
//! server before any bytes have been written.
//!
//! Many of these features can configured, by making use of
//! [`Client::builder`](Client::builder).
//!
//! ## Example
//!
//! For a small example program simply fetching a URL, take a look at the
//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs).
//!
//! ```
//! extern crate hyper;
//!
//! use hyper::{Client, Uri};
//! # #[cfg(feature = "runtime")]
//! use hyper::rt::{self, Future, Stream};
//!
//! # #[cfg(feature = "runtime")]
//! # fn fetch_httpbin() {
//! let client = Client::new();
//!
//! let fut = client
//!
//! // Make a GET /ip to 'http://httpbin.org'
//! .get(Uri::from_static("http://httpbin.org/ip"))
//!
//! // And then, if the request gets a response...
//! .and_then(|res| {
//! println!("status: {}", res.status());
//!
//! // Concatenate the body stream into a single buffer...
//! // This returns a new future, since we must stream body.
//! res.into_body().concat2()
//! })
//!
//! // And then, if reading the full body succeeds...
//! .and_then(|body| {
//! // The body is just bytes, but let's print a string...
//! let s = ::std::str::from_utf8(&body)
//! .expect("httpbin sends utf-8 JSON");
//!
//! println!("body: {}", s);
//!
//! // and_then requires we return a new Future, and it turns
//! // out that Result is a Future that is ready immediately.
//! Ok(())
//! })
//!
//! // Map any errors that might have happened...
//! .map_err(|err| {
//! println!("error: {}", err);
//! });
//!
//! // A runtime is needed to execute our asynchronous code. In order to
//! // spawn the future into the runtime, it should already have been
//! // started and running before calling this code.
//! rt::spawn(fut);
//! # }
//! # fn main () {}
//! ```
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::time::Duration;
use futures::{Async, Future, Poll};
use futures::future::{self, Either, Executor};
use futures::sync::oneshot;
use http::{Method, Request, Response, Uri, Version};
use http::header::{HeaderValue, HOST};
use http::uri::Scheme;
use body::{Body, Payload};
use common::{lazy as hyper_lazy, Lazy};
use self::connect::{Alpn, Connect, Connected, Destination};
use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
#[cfg(feature = "runtime")] pub use self::connect::HttpConnector;
pub mod conn;
pub mod connect;
pub(crate) mod dispatch;
mod pool;
#[cfg(test)]
mod tests;
/// A Client to make outgoing HTTP requests.
pub struct Client<C, B = Body> {
config: Config,
conn_builder: conn::Builder,
connector: Arc<C>,
pool: Pool<PoolClient<B>>,
}
#[derive(Clone, Copy, Debug)]
struct Config {
retry_canceled_requests: bool,
set_host: bool,
ver: Ver,
}
#[cfg(feature = "runtime")]
impl Client<HttpConnector, Body> {
/// Create a new Client with the default [config](Builder).
///
/// # Note
///
/// The default connector does **not** handle TLS. Speaking to `https`
/// destinations will require [configuring a connector that implements
/// TLS](https://hyper.rs/guides/client/configuration).
#[inline]
pub fn new() -> Client<HttpConnector, Body> {
Builder::default().build_http()
}
}
#[cfg(feature = "runtime")]
impl Default for Client<HttpConnector, Body> {
fn default() -> Client<HttpConnector, Body> {
Client::new()
}
}
impl Client<(), Body> {
/// Create a builder to configure a new `Client`.
///
/// # Example
///
/// ```
/// # extern crate hyper;
/// # #[cfg(feature = "runtime")]
/// # fn run () {
/// use hyper::Client;
///
/// let client = Client::builder()
/// .keep_alive(true)
/// .http2_only(true)
/// .build_http();
/// # let infer: Client<_, hyper::Body> = client;
/// # drop(infer);
/// # }
/// # fn main() {}
/// ```
#[inline]
pub fn builder() -> Builder {
Builder::default()
}
}
impl<C, B> Client<C, B>
where C: Connect + Sync + 'static,
C::Transport: 'static,
C::Future: 'static,
B: Payload + Send + 'static,
B::Data: Send,
{
/// Send a `GET` request to the supplied `Uri`.
///
/// # Note
///
/// This requires that the `Payload` type have a `Default` implementation.
/// It *should* return an "empty" version of itself, such that
/// `Payload::is_end_stream` is `true`.
///
/// # Example
///
/// ```
/// # extern crate hyper;
/// # #[cfg(feature = "runtime")]
/// # fn run () {
/// use hyper::{Client, Uri};
///
/// let client = Client::new();
///
/// let future = client.get(Uri::from_static("http://httpbin.org/ip"));
/// # }
/// # fn main() {}
/// ```
pub fn get(&self, uri: Uri) -> ResponseFuture
where
B: Default,
{
let body = B::default();
if !body.is_end_stream() {
warn!("default Payload used for get() does not return true for is_end_stream");
}
let mut req = Request::new(body);
*req.uri_mut() = uri;
self.request(req)
}
/// Send a constructed `Request` using this `Client`.
///
/// # Example
///
/// ```
/// # extern crate hyper;
/// # #[cfg(feature = "runtime")]
/// # fn run () {
/// use hyper::{Body, Client, Request};
///
/// let client = Client::new();
///
/// let req = Request::builder()
/// .method("POST")
/// .uri("http://httpin.org/post")
/// .body(Body::from("Hallo!"))
/// .expect("request builder");
///
/// let future = client.request(req);
/// # }
/// # fn main() {}
/// ```
pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
let is_http_connect = req.method() == &Method::CONNECT;
match req.version() {
Version::HTTP_11 => (),
Version::HTTP_10 => if is_http_connect {
debug!("CONNECT is not allowed for HTTP/1.0");
return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_request_method())));
},
other => if self.config.ver != Ver::Http2 {
error!("Request has unsupported version \"{:?}\"", other);
return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_version())));
}
};
let domain = match extract_domain(req.uri_mut(), is_http_connect) {
Ok(s) => s,
Err(err) => {
return ResponseFuture::new(Box::new(future::err(err)));
}
};
let pool_key = Arc::new(domain.to_string());
ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key)))
}
fn retryably_send_request(&self, req: Request<B>, pool_key: PoolKey) -> impl Future<Item=Response<Body>, Error=::Error> {
let client = self.clone();
let uri = req.uri().clone();
let mut send_fut = client.send_request(req, pool_key.clone());
future::poll_fn(move || loop {
match send_fut.poll() {
Ok(Async::Ready(resp)) => return Ok(Async::Ready(resp)),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(ClientError::Normal(err)) => return Err(err),
Err(ClientError::Canceled {
connection_reused,
mut req,
reason,
}) => {
if !client.config.retry_canceled_requests || !connection_reused {
// if client disabled, don't retry
// a fresh connection means we definitely can't retry
return Err(reason);
}
trace!("unstarted request canceled, trying again (reason={:?})", reason);
*req.uri_mut() = uri.clone();
send_fut = client.send_request(req, pool_key.clone());
}
}
})
}
fn send_request(&self, mut req: Request<B>, pool_key: PoolKey) -> impl Future<Item=Response<Body>, Error=ClientError<B>> {
let conn = self.connection_for(req.uri().clone(), pool_key);
let set_host = self.config.set_host;
let executor = self.conn_builder.exec.clone();
conn.and_then(move |mut pooled| {
if pooled.is_http1() {
if set_host {
let uri = req.uri().clone();
req
.headers_mut()
.entry(HOST)
.expect("HOST is always valid header name")
.or_insert_with(|| {
let hostname = uri.host().expect("authority implies host");
if let Some(port) = uri.port_part() {
let s = format!("{}:{}", hostname, port);
HeaderValue::from_str(&s)
} else {
HeaderValue::from_str(hostname)
}.expect("uri host is valid header value")
});
}
// CONNECT always sends authority-form, so check it first...
if req.method() == &Method::CONNECT {
authority_form(req.uri_mut());
} else if pooled.conn_info.is_proxied {
absolute_form(req.uri_mut());
} else {
origin_form(req.uri_mut());
};
} else if req.method() == &Method::CONNECT {
debug!("client does not support CONNECT requests over HTTP2");
return Either::A(future::err(ClientError::Normal(::Error::new_user_unsupported_request_method())));
}
let fut = pooled.send_request_retryable(req)
.map_err(ClientError::map_with_reused(pooled.is_reused()));
// If the Connector included 'extra' info, add to Response...
let extra_info = pooled.conn_info.extra.clone();
let fut = fut.map(move |mut res| {
if let Some(extra) = extra_info {
extra.set(&mut res);
}
res
});
// As of [email protected], there is a race condition in the mpsc
// channel, such that sending when the receiver is closing can
// result in the message being stuck inside the queue. It won't
// ever notify until the Sender side is dropped.
//
// To counteract this, we must check if our senders 'want' channel
// has been closed after having tried to send. If so, error out...
if pooled.is_closed() {
return Either::B(Either::A(fut));
}
Either::B(Either::B(fut
.and_then(move |mut res| {
// If pooled is HTTP/2, we can toss this reference immediately.
//
// when pooled is dropped, it will try to insert back into the
// pool. To delay that, spawn a future that completes once the
// sender is ready again.
//
// This *should* only be once the related `Connection` has polled
// for a new request to start.
//
// It won't be ready if there is a body to stream.
if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
drop(pooled);
} else if !res.body().is_end_stream() {
let (delayed_tx, delayed_rx) = oneshot::channel();
res.body_mut().delayed_eof(delayed_rx);
let on_idle = future::poll_fn(move || {
pooled.poll_ready()
})
.then(move |_| {
// At this point, `pooled` is dropped, and had a chance
// to insert into the pool (if conn was idle)
drop(delayed_tx);
Ok(())
});
if let Err(err) = executor.execute(on_idle) {
// This task isn't critical, so just log and ignore.
warn!("error spawning task to insert idle connection: {}", err);
}
} else {
// There's no body to delay, but the connection isn't
// ready yet. Only re-insert when it's ready
let on_idle = future::poll_fn(move || {
pooled.poll_ready()
})
.then(|_| Ok(()));
if let Err(err) = executor.execute(on_idle) {
// This task isn't critical, so just log and ignore.
warn!("error spawning task to insert idle connection: {}", err);
}
}
Ok(res)
})))
})
}
fn connection_for(&self, uri: Uri, pool_key: PoolKey)
-> impl Future<Item=Pooled<PoolClient<B>>, Error=ClientError<B>>
{
// This actually races 2 different futures to try to get a ready
// connection the fastest, and to reduce connection churn.
//
// - If the pool has an idle connection waiting, that's used
// immediately.
// - Otherwise, the Connector is asked to start connecting to
// the destination Uri.
// - Meanwhile, the pool Checkout is watching to see if any other
// request finishes and tries to insert an idle connection.
// - If a new connection is started, but the Checkout wins after
// (an idle connection becamse available first), the started
// connection future is spawned into the runtime to complete,
// and then be inserted into the pool as an idle connection.
let checkout = self.pool.checkout(pool_key.clone());
let connect = self.connect_to(uri, pool_key);
let executor = self.conn_builder.exec.clone();
checkout
// The order of the `select` is depended on below...
.select2(connect)
.map(move |either| match either {
// Checkout won, connect future may have been started or not.
//
// If it has, let it finish and insert back into the pool,
// so as to not waste the socket...
Either::A((checked_out, connecting)) => {
// This depends on the `select` above having the correct
// order, such that if the checkout future were ready
// immediately, the connect future will never have been
// started.
//
// If it *wasn't* ready yet, then the connect future will
// have been started...
if connecting.started() {
let bg = connecting
.map(|_pooled| {
// dropping here should just place it in
// the Pool for us...
})
.map_err(|err| {
trace!("background connect error: {}", err);
});
// An execute error here isn't important, we're just trying
// to prevent a waste of a socket...
let _ = executor.execute(bg);
}
checked_out
},
// Connect won, checkout can just be dropped.
Either::B((connected, _checkout)) => {
connected
},
})
.or_else(|either| match either {
// Either checkout or connect could get canceled:
//
// 1. Connect is canceled if this is HTTP/2 and there is
// an outstanding HTTP/2 connecting task.
// 2. Checkout is canceled if the pool cannot deliver an
// idle connection reliably.
//
// In both cases, we should just wait for the other future.
Either::A((err, connecting)) => {
if err.is_canceled() {
Either::A(Either::A(connecting.map_err(ClientError::Normal)))
} else {
Either::B(future::err(ClientError::Normal(err)))
}
},
Either::B((err, checkout)) => {
if err.is_canceled() {
Either::A(Either::B(checkout.map_err(ClientError::Normal)))
} else {
Either::B(future::err(ClientError::Normal(err)))
}
}
})
}
fn connect_to(&self, uri: Uri, pool_key: PoolKey)
-> impl Lazy<Item=Pooled<PoolClient<B>>, Error=::Error>
{
let executor = self.conn_builder.exec.clone();
let pool = self.pool.clone();
let mut conn_builder = self.conn_builder.clone();
let ver = self.config.ver;
let is_ver_h2 = ver == Ver::Http2;
let connector = self.connector.clone();
let dst = Destination {
uri,
};
hyper_lazy(move || {
// Try to take a "connecting lock".
//
// If the pool_key is for HTTP/2, and there is already a
// connection being estabalished, then this can't take a
// second lock. The "connect_to" future is Canceled.
let connecting = match pool.connecting(&pool_key, ver) {
Some(lock) => lock,
None => {
let canceled = ::Error::new_canceled(Some("HTTP/2 connection in progress"));
return Either::B(future::err(canceled));
}
};
Either::A(connector.connect(dst)
.map_err(::Error::new_connect)
.and_then(move |(io, connected)| {
// If ALPN is h2 and we aren't http2_only already,
// then we need to convert our pool checkout into
// a single HTTP2 one.
let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 {
match connecting.alpn_h2(&pool) {
Some(lock) => {
trace!("ALPN negotiated h2, updating pool");
lock
},
None => {
// Another connection has already upgraded,
// the pool checkout should finish up for us.
let canceled = ::Error::new_canceled(Some("ALPN upgraded to HTTP/2"));
return Either::B(future::err(canceled));
}
}
} else {
connecting
};
let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
Either::A(conn_builder
.http2_only(is_h2)
.handshake(io)
.and_then(move |(tx, conn)| {
trace!("handshake complete, spawning background dispatcher task");
let bg = executor.execute(conn.map_err(|e| {
debug!("client connection error: {}", e)
}));
// This task is critical, so an execute error
// should be returned.
if let Err(err) = bg {
warn!("error spawning critical client task: {}", err);
return Either::A(future::err(err));
}
// Wait for 'conn' to ready up before we
// declare this tx as usable
Either::B(tx.when_ready())
})
.map(move |tx| {
pool.pooled(connecting, PoolClient {
conn_info: connected,
tx: if is_h2 {
PoolTx::Http2(tx.into_http2())
} else {
PoolTx::Http1(tx)
},
})
}))
}))
})
}
}
impl<C, B> Clone for Client<C, B> {
fn clone(&self) -> Client<C, B> {
Client {
config: self.config.clone(),
conn_builder: self.conn_builder.clone(),
connector: self.connector.clone(),
pool: self.pool.clone(),
}
}
}
impl<C, B> fmt::Debug for Client<C, B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Client")
.finish()
}
}
/// A `Future` that will resolve to an HTTP Response.
///
/// This is returned by `Client::request` (and `Client::get`).
#[must_use = "futures do nothing unless polled"]
pub struct ResponseFuture {
inner: Box<Future<Item=Response<Body>, Error=::Error> + Send>,
}
impl ResponseFuture {
fn new(fut: Box<Future<Item=Response<Body>, Error=::Error> + Send>) -> Self {
Self {
inner: fut,
}
}
}
impl fmt::Debug for ResponseFuture {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Future<Response>")
}
}
impl Future for ResponseFuture {
type Item = Response<Body>;
type Error = ::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.inner.poll()
}
}
// FIXME: allow() required due to `impl Trait` leaking types to this lint
#[allow(missing_debug_implementations)]
struct PoolClient<B> {
conn_info: Connected,
tx: PoolTx<B>,
}
enum PoolTx<B> {
Http1(conn::SendRequest<B>),
Http2(conn::Http2SendRequest<B>),
}
impl<B> PoolClient<B> {
fn poll_ready(&mut self) -> Poll<(), ::Error> {
match self.tx {
PoolTx::Http1(ref mut tx) => tx.poll_ready(),
PoolTx::Http2(_) => Ok(Async::Ready(())),
}
}
fn is_http1(&self) -> bool {
!self.is_http2()
}
fn is_http2(&self) -> bool {
match self.tx {
PoolTx::Http1(_) => false,
PoolTx::Http2(_) => true,
}
}
fn is_ready(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_ready(),
PoolTx::Http2(ref tx) => tx.is_ready(),
}
}
fn is_closed(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_closed(),
PoolTx::Http2(ref tx) => tx.is_closed(),
}
}
}
impl<B: Payload + 'static> PoolClient<B> {
fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item = Response<Body>, Error = (::Error, Option<Request<B>>)>
where
B: Send,
{
match self.tx {
PoolTx::Http1(ref mut tx) => Either::A(tx.send_request_retryable(req)),
PoolTx::Http2(ref mut tx) => Either::B(tx.send_request_retryable(req)),
}
}
}
impl<B> Poolable for PoolClient<B>
where
B: Send + 'static,
{
fn is_open(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_ready(),
PoolTx::Http2(ref tx) => tx.is_ready(),
}
}
fn reserve(self) -> Reservation<Self> {
match self.tx {
PoolTx::Http1(tx) => {
Reservation::Unique(PoolClient {
conn_info: self.conn_info,
tx: PoolTx::Http1(tx),
})
},
PoolTx::Http2(tx) => {
let b = PoolClient {
conn_info: self.conn_info.clone(),
tx: PoolTx::Http2(tx.clone()),
};
let a = PoolClient {
conn_info: self.conn_info,
tx: PoolTx::Http2(tx),
};
Reservation::Shared(a, b)
}
}
}
fn can_share(&self) -> bool {
self.is_http2()
}
}
// FIXME: allow() required due to `impl Trait` leaking types to this lint
#[allow(missing_debug_implementations)]
enum ClientError<B> {
Normal(::Error),
Canceled {
connection_reused: bool,
req: Request<B>,
reason: ::Error,
}
}
impl<B> ClientError<B> {
fn map_with_reused(conn_reused: bool)
-> impl Fn((::Error, Option<Request<B>>)) -> Self
{
move |(err, orig_req)| {
if let Some(req) = orig_req {
ClientError::Canceled {
connection_reused: conn_reused,
reason: err,
req,
}
} else {
ClientError::Normal(err)
}
}
}
}
/// A marker to identify what version a pooled connection is.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum Ver {
Auto,
Http2,
}
fn origin_form(uri: &mut Uri) {
let path = match uri.path_and_query() {
Some(path) if path.as_str() != "/" => {
let mut parts = ::http::uri::Parts::default();
parts.path_and_query = Some(path.clone());
Uri::from_parts(parts).expect("path is valid uri")
},
_none_or_just_slash => {
debug_assert!(Uri::default() == "/");
Uri::default()
}
};
*uri = path
}
fn absolute_form(uri: &mut Uri) {
debug_assert!(uri.scheme_part().is_some(), "absolute_form needs a scheme");
debug_assert!(uri.authority_part().is_some(), "absolute_form needs an authority");
// If the URI is to HTTPS, and the connector claimed to be a proxy,
// then it *should* have tunneled, and so we don't want to send
// absolute-form in that case.
if uri.scheme_part() == Some(&Scheme::HTTPS) {
origin_form(uri);
}
}
fn authority_form(uri: &mut Uri) {
if log_enabled!(::log::Level::Warn) {
if let Some(path) = uri.path_and_query() {
// `https://hyper.rs` would parse with `/` path, don't
// annoy people about that...
if path != "/" {
warn!(
"HTTP/1.1 CONNECT request stripping path: {:?}",
path
);
}
}
}
*uri = match uri.authority_part() {
Some(auth) => {
let mut parts = ::http::uri::Parts::default();
parts.authority = Some(auth.clone());
Uri::from_parts(parts).expect("authority is valid")
},
None => {
unreachable!("authority_form with relative uri");
}
};
}
fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> ::Result<String> {
let uri_clone = uri.clone();
match (uri_clone.scheme_part(), uri_clone.authority_part()) {
(Some(scheme), Some(auth)) => {
Ok(format!("{}://{}", scheme, auth))
}
(None, Some(auth)) if is_http_connect => {
let port = auth.port_part();
let scheme = match port.as_ref().map(|p| p.as_str()) {
Some("443") => {
set_scheme(uri, Scheme::HTTPS);
"https"
}
_ => {
set_scheme(uri, Scheme::HTTP);
"http"
},
};
Ok(format!("{}://{}", scheme, auth))
},
_ => {
debug!("Client requires absolute-form URIs, received: {:?}", uri);
Err(::Error::new_user_absolute_uri_required())
}
}
}
fn set_scheme(uri: &mut Uri, scheme: Scheme) {
debug_assert!(uri.scheme_part().is_none(), "set_scheme expects no existing scheme");
let old = mem::replace(uri, Uri::default());
let mut parts: ::http::uri::Parts = old.into();
parts.scheme = Some(scheme);
parts.path_and_query = Some("/".parse().expect("slash is a valid path"));
*uri = Uri::from_parts(parts).expect("scheme is valid");
}
/// A builder to configure a new [`Client`](Client).
///
/// # Example
///
/// ```
/// # extern crate hyper;
/// # #[cfg(feature = "runtime")]
/// # fn run () {
/// use hyper::Client;
///
/// let client = Client::builder()
/// .keep_alive(true)
/// .http2_only(true)
/// .build_http();
/// # let infer: Client<_, hyper::Body> = client;
/// # drop(infer);
/// # }
/// # fn main() {}
/// ```
#[derive(Clone)]
pub struct Builder {
client_config: Config,
conn_builder: conn::Builder,
pool_config: pool::Config,
}
impl Default for Builder {
fn default() -> Self {
Self {
client_config: Config {
retry_canceled_requests: true,
set_host: true,
ver: Ver::Auto,
},
conn_builder: conn::Builder::new(),
pool_config: pool::Config {
enabled: true,
keep_alive_timeout: Some(Duration::from_secs(90)),
max_idle_per_host: ::std::usize::MAX,
},
}
}
}
impl Builder {
/// Enable or disable keep-alive mechanics.
///
/// Default is enabled.
#[inline]
pub fn keep_alive(&mut self, val: bool) -> &mut Self {
self.pool_config.enabled = val;
self
}
/// Set an optional timeout for idle sockets being kept-alive.
///
/// Pass `None` to disable timeout.
///
/// Default is 90 seconds.
#[inline]
pub fn keep_alive_timeout<D>(&mut self, val: D) -> &mut Self
where
D: Into<Option<Duration>>,
{
self.pool_config.keep_alive_timeout = val.into();
self
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
///
/// Note that setting this to false may mean more copies of body data,
/// but may also improve performance when an IO transport doesn't
/// support vectored writes well, such as most TLS implementations.
///
/// Default is `true`.
#[inline]
pub fn http1_writev(&mut self, val: bool) -> &mut Self {
self.conn_builder.h1_writev(val);
self
}
/// Sets the exact size of the read buffer to *always* use.
///
/// Note that setting this option unsets the `http1_max_buf_size` option.
///
/// Default is an adaptive read buffer.
#[inline]
pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self {
self.conn_builder.h1_read_buf_exact_size(Some(sz));
self
}
/// Set the maximum buffer size for the connection.
///
/// Default is ~400kb.
///
/// Note that setting this option unsets the `http1_read_exact_buf_size` option.
///
/// # Panics
///
/// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
#[inline]
pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
self.conn_builder.h1_max_buf_size(max);
self
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
self.conn_builder.h1_title_case_headers(val);
self
}
/// Set whether the connection **must** use HTTP/2.
///
/// The destination must either allow HTTP2 Prior Knowledge, or the
/// `Connect` should be configured to do use ALPN to upgrade to `h2`
/// as part of the connection process. This will not make the `Client`
/// utilize ALPN by itself.
///
/// Note that setting this to true prevents HTTP/1 from being allowed.
///
/// Default is false.
pub fn http2_only(&mut self, val: bool) -> &mut Self {
self.client_config.ver = if val {
Ver::Http2
} else {
Ver::Auto
};
self
}
/// Sets the maximum idle connection per host allowed in the pool.
///
/// Default is `usize::MAX` (no limit).
pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
self.pool_config.max_idle_per_host = max_idle;
self
}
/// Set whether to retry requests that get disrupted before ever starting
/// to write.
///
/// This means a request that is queued, and gets given an idle, reused
/// connection, and then encounters an error immediately as the idle
/// connection was found to be unusable.
///
/// When this is set to `false`, the related `ResponseFuture` would instead
/// resolve to an `Error::Cancel`.
///
/// Default is `true`.
#[inline]
pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self {
self.client_config.retry_canceled_requests = val;
self
}
/// Set whether to automatically add the `Host` header to requests.
///
/// If true, and a request does not include a `Host` header, one will be
/// added automatically, derived from the authority of the `Uri`.
///
/// Default is `true`.
#[inline]
pub fn set_host(&mut self, val: bool) -> &mut Self {
self.client_config.set_host = val;
self
}
/// Provide an executor to execute background `Connection` tasks.
pub fn executor<E>(&mut self, exec: E) -> &mut Self
where
E: Executor<Box<Future<Item=(), Error=()> + Send>> + Send + Sync + 'static,
{
self.conn_builder.executor(exec);
self
}
/// Builder a client with this configuration and the default `HttpConnector`.
#[cfg(feature = "runtime")]
pub fn build_http<B>(&self) -> Client<HttpConnector, B>
where
B: Payload + Send,
B::Data: Send,
{
let mut connector = HttpConnector::new(4);
if self.pool_config.enabled {
connector.set_keepalive(self.pool_config.keep_alive_timeout);
}
self.build(connector)
}
/// Combine the configuration of this builder with a connector to create a `Client`.
pub fn build<C, B>(&self, connector: C) -> Client<C, B>
where
C: Connect,
C::Transport: 'static,
C::Future: 'static,
B: Payload + Send,
B::Data: Send,
{
Client {
config: self.client_config,
conn_builder: self.conn_builder.clone(),
connector: Arc::new(connector),
pool: Pool::new(self.pool_config, &self.conn_builder.exec),
}
}
}
impl fmt::Debug for Builder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Builder")
.field("client_config", &self.client_config)
.field("conn_builder", &self.conn_builder)
.field("pool_config", &self.pool_config)
.finish()
}
}
#[cfg(test)]
mod unit_tests {
use super::*;
#[test]
fn set_relative_uri_with_implicit_path() {
let mut uri = "http://hyper.rs".parse().unwrap();
origin_form(&mut uri);
assert_eq!(uri.to_string(), "/");
}
#[test]
fn test_origin_form() {
let mut uri = "http://hyper.rs/guides".parse().unwrap();
origin_form(&mut uri);
assert_eq!(uri.to_string(), "/guides");
let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap();
origin_form(&mut uri);
assert_eq!(uri.to_string(), "/guides?foo=bar");
}
#[test]
fn test_absolute_form() {
let mut uri = "http://hyper.rs/guides".parse().unwrap();
absolute_form(&mut uri);
assert_eq!(uri.to_string(), "http://hyper.rs/guides");
let mut uri = "https://hyper.rs/guides".parse().unwrap();
absolute_form(&mut uri);
assert_eq!(uri.to_string(), "/guides");
}
#[test]
fn test_authority_form() {
extern crate pretty_env_logger;
let _ = pretty_env_logger::try_init();
let mut uri = "http://hyper.rs".parse().unwrap();
authority_form(&mut uri);
assert_eq!(uri.to_string(), "hyper.rs");
let mut uri = "hyper.rs".parse().unwrap();
authority_form(&mut uri);
assert_eq!(uri.to_string(), "hyper.rs");
}
#[test]
fn test_extract_domain_connect_no_port() {
let mut uri = "hyper.rs".parse().unwrap();
let domain = extract_domain(&mut uri, true).expect("extract domain");
assert_eq!(domain, "http://hyper.rs");
}
}
| 34.38018 | 134 | 0.512211 |
b99b7d16208f08ee4c6eedaa1f31b20aa62c1482 | 730 | use anyhow::{
Result,
bail
};
use pyo3::Python;
fn main() -> Result<()> {
let mut args = std::env::args();
args.next();
let experiment = match args.next() {
Option::Some(experiment) => experiment.replace(".", ""),
Option::None => bail!("please enter an experiment number")
};
std::env::set_current_dir("run")?;
#[inline]
fn run(python: Python, experiment: &str) -> Result<()> {
python.import("sys")?.getattr("path")?.call_method1("append", (".",))?;
python.import("depla")?.call_method0(&format!("Experiment{}", experiment))?.call_method0("run")?;
Result::Ok(())
}
Python::with_gil(|python| run(python, &experiment))?;
Result::Ok(())
}
| 25.172414 | 105 | 0.565753 |
f410e6fd1acbc0cd104e7178d194c5dfc9ff1ae9 | 4,121 | use crate::{
cfg::{self, CfgPrivate},
page,
sync::{
atomic::{AtomicUsize, Ordering},
lazy_static, thread_local, Mutex,
},
Pack,
};
use std::{
cell::{Cell, UnsafeCell},
collections::VecDeque,
fmt,
marker::PhantomData,
};
/// Uniquely identifies a thread.
pub(crate) struct Tid<C> {
id: usize,
_not_send: PhantomData<UnsafeCell<()>>,
_cfg: PhantomData<fn(C)>,
}
#[derive(Debug)]
struct Registration(Cell<Option<usize>>);
struct Registry {
next: AtomicUsize,
free: Mutex<VecDeque<usize>>,
}
lazy_static! {
static ref REGISTRY: Registry = Registry {
next: AtomicUsize::new(0),
free: Mutex::new(VecDeque::new()),
};
}
thread_local! {
static REGISTRATION: Registration = Registration::new();
}
// === impl Tid ===
impl<C: cfg::Config> Pack<C> for Tid<C> {
const LEN: usize = C::MAX_SHARDS.trailing_zeros() as usize + 1;
type Prev = page::Addr<C>;
#[inline(always)]
fn as_usize(&self) -> usize {
self.id
}
#[inline(always)]
fn from_usize(id: usize) -> Self {
Self {
id,
_not_send: PhantomData,
_cfg: PhantomData,
}
}
}
impl<C: cfg::Config> Tid<C> {
#[inline]
pub(crate) fn current() -> Self {
REGISTRATION
.try_with(Registration::current)
.unwrap_or_else(|_| Self::poisoned())
}
pub(crate) fn is_current(self) -> bool {
REGISTRATION
.try_with(|r| self == r.current::<C>())
.unwrap_or(false)
}
#[inline(always)]
pub fn new(id: usize) -> Self {
Self::from_usize(id)
}
}
impl<C> Tid<C> {
#[cold]
fn poisoned() -> Self {
Self {
id: std::usize::MAX,
_not_send: PhantomData,
_cfg: PhantomData,
}
}
/// Returns true if the local thread ID was accessed while unwinding.
pub(crate) fn is_poisoned(&self) -> bool {
self.id == std::usize::MAX
}
}
impl<C> fmt::Debug for Tid<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_poisoned() {
f.debug_tuple("Tid")
.field(&format_args!("<poisoned>"))
.finish()
} else {
f.debug_tuple("Tid")
.field(&format_args!("{}", self.id))
.finish()
}
}
}
impl<C> PartialEq for Tid<C> {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl<C> Eq for Tid<C> {}
impl<C: cfg::Config> Clone for Tid<C> {
fn clone(&self) -> Self {
Self::new(self.id)
}
}
impl<C: cfg::Config> Copy for Tid<C> {}
// === impl Registration ===
impl Registration {
fn new() -> Self {
Self(Cell::new(None))
}
#[inline(always)]
fn current<C: cfg::Config>(&self) -> Tid<C> {
if let Some(tid) = self.0.get().map(Tid::new) {
tid
} else {
self.register()
}
}
#[cold]
fn register<C: cfg::Config>(&self) -> Tid<C> {
let id = REGISTRY
.free
.lock()
.ok()
.and_then(|mut free| {
if free.len() > 1 {
free.pop_front()
} else {
None
}
})
.unwrap_or_else(|| REGISTRY.next.fetch_add(1, Ordering::AcqRel));
debug_assert!(id <= Tid::<C>::BITS, "thread ID overflow!");
self.0.set(Some(id));
Tid::new(id)
}
}
// Reusing thread IDs doesn't work under loom, since this `Drop` impl results in
// an access to a `loom` lazy_static while the test is shutting down, which
// panics. T_T
// Just skip TID reuse and use loom's lazy_static macro to ensure we have a
// clean initial TID on every iteration, instead.
#[cfg(not(all(loom, any(feature = "loom", test))))]
impl Drop for Registration {
fn drop(&mut self) {
if let Some(id) = self.0.get() {
if let Ok(mut free) = REGISTRY.free.lock() {
free.push_back(id);
}
}
}
}
| 22.642857 | 80 | 0.517107 |
ed492d1348fc86065c42ec49fada00d7d3bb3285 | 997 | // This file was generated by gir (https://github.com/gtk-rs/gir @ 8b9d0bb)
// from gir-files (https://github.com/gtk-rs/gir-files @ 77d1f70)
// DO NOT EDIT
use Bin;
use Buildable;
use Container;
use Dialog;
use FileChooser;
use Widget;
use Window;
use ffi;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::mem;
use std::ptr;
glib_wrapper! {
pub struct FileChooserDialog(Object<ffi::GtkFileChooserDialog, ffi::GtkFileChooserDialogClass>): Dialog, Window, Bin, Container, Widget, Buildable, FileChooser;
match fn {
get_type => || ffi::gtk_file_chooser_dialog_get_type(),
}
}
impl FileChooserDialog {
//pub fn new<'a, 'b, 'c, P: Into<Option<&'a str>>, Q: IsA<Window> + 'b, R: Into<Option<&'b Q>>, S: Into<Option<&'c str>>>(title: P, parent: R, action: FileChooserAction, first_button_text: S, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> FileChooserDialog {
// unsafe { TODO: call ffi::gtk_file_chooser_dialog_new() }
//}
}
| 31.15625 | 281 | 0.691073 |
dd09f27413cf337f6d1b80980be3fc16616525d9 | 1,776 | //! This module contains utility functions for internal testing and benchmarking.
//! Unit util in general will be placed in the files where the tested functions
//! are located.
//!
//! This is meant to generate common parameter objects to test or benchmark the lib.
//!
//! This module will also contain benchmarking utility functions.
use crate::crack::parameter::CrackParameter;
use crate::symbols::Builder;
use crate::transform_fns::{str_to_sha256_hash, Sha256Hash, NO_HASHING, SHA256_HASHING};
/// Creates CrackParameter for full alphabet with identity hashing.
#[allow(dead_code)]
pub fn create_test_crack_params_full_alphabet(target: &str) -> CrackParameter<String> {
let alphabet = Builder::new().full().build();
let max_len = target.len() as u32;
let min_len = 0;
CrackParameter::new(
target.to_owned(),
alphabet,
max_len,
min_len,
NO_HASHING,
false,
)
}
/// Creates CrackParameter for full alphabet with sha256 hashing.
#[allow(dead_code)]
pub fn create_test_crack_params_full_alphabet_sha256(target: &str) -> CrackParameter<Sha256Hash> {
let target = str_to_sha256_hash(target);
let alphabet = Builder::new().full().build();
let max_len = 6;
let min_len = 0;
CrackParameter::new(target, alphabet, max_len, min_len, SHA256_HASHING, false)
}
/// Creates CrackParameter for full alphabet with sha256 hashing and fair mode.
#[allow(dead_code)]
pub fn create_test_crack_params_full_alphabet_sha256_fair(
target: &str,
) -> CrackParameter<Sha256Hash> {
let target = str_to_sha256_hash(target);
let alphabet = Builder::new().full().build();
let max_len = 5;
let min_len = 0;
CrackParameter::new(target, alphabet, max_len, min_len, SHA256_HASHING, true)
}
| 35.52 | 98 | 0.718468 |
7179b82b183d30ecc79a819148bd148bf9a4a5b4 | 331 | //! Low-level bindings for the [Bitwuzla] SMT solver.
//!
//! Please see the Bitwuzla [C API documentation] for function descriptions.
//!
//! [Bitwuzla]: https://bitwuzla.github.io/
//! [C API documentation]: https://bitwuzla.github.io/docs/c/api.html
#![allow(non_upper_case_globals)]
include!("../src-generated/bindings.rs");
| 30.090909 | 76 | 0.706949 |
0a4fcde55ec88e95256528ad15b95b1e6631f547 | 7,756 | // Copyright 2022 pyke.io
// 2019-2021 Tauri Programme within The Commons Conservancy
// [https://tauri.studio/]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_imports)]
use millennium_macros::{command_enum, module_command_handler, CommandModule};
use serde::Deserialize;
use super::InvokeContext;
#[cfg(shell_scope)]
use crate::ExecuteArgs;
use crate::{api::ipc::CallbackFn, Runtime};
#[cfg(shell_scope)]
use crate::{Manager, Scopes};
#[cfg(not(shell_scope))]
type ExecuteArgs = ();
#[cfg(any(shell_execute, shell_sidecar))]
use std::sync::{Arc, Mutex};
use std::{collections::HashMap, path::PathBuf};
type ChildId = u32;
#[cfg(any(shell_execute, shell_sidecar))]
type ChildStore = Arc<Mutex<HashMap<ChildId, crate::api::process::CommandChild>>>;
#[cfg(any(shell_execute, shell_sidecar))]
fn command_childs() -> &'static ChildStore {
use once_cell::sync::Lazy;
static STORE: Lazy<ChildStore> = Lazy::new(Default::default);
&STORE
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum Buffer {
Text(String),
Raw(Vec<u8>)
}
#[allow(clippy::unnecessary_wraps)]
fn default_env() -> Option<HashMap<String, String>> {
Some(HashMap::default())
}
#[allow(dead_code)]
#[derive(Debug, Clone, Default, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CommandOptions {
#[serde(default)]
sidecar: bool,
cwd: Option<PathBuf>,
// by default we don't add any env variables to the spawned process
// but the env is an `Option` so when it's `None` we clear the env.
#[serde(default = "default_env")]
env: Option<HashMap<String, String>>
}
/// The API descriptor.
#[command_enum]
#[derive(Deserialize, CommandModule)]
#[serde(tag = "cmd", rename_all = "camelCase")]
pub enum Cmd {
/// The execute script API.
#[cmd(shell_script, "shell > execute or shell > sidecar")]
#[serde(rename_all = "camelCase")]
Execute {
program: String,
args: ExecuteArgs,
on_event_fn: CallbackFn,
#[serde(default)]
options: CommandOptions
},
#[cmd(shell_script, "shell > execute or shell > sidecar")]
StdinWrite { pid: ChildId, buffer: Buffer },
#[cmd(shell_script, "shell > execute or shell > sidecar")]
KillChild { pid: ChildId },
#[cmd(shell_open, "shell > open")]
Open { path: String, with: Option<String> }
}
impl Cmd {
#[module_command_handler(shell_script)]
#[allow(unused_variables)]
fn execute<R: Runtime>(
context: InvokeContext<R>,
program: String,
args: ExecuteArgs,
on_event_fn: CallbackFn,
options: CommandOptions
) -> super::Result<ChildId> {
let mut command = if options.sidecar {
#[cfg(not(shell_sidecar))]
return Err(crate::Error::ApiNotAllowlisted("shell > sidecar".to_string()).into_anyhow());
#[cfg(shell_sidecar)]
{
let program = PathBuf::from(program);
let program_as_string = program.display().to_string();
let program_no_ext_as_string = program.with_extension("").display().to_string();
let configured_sidecar = context
.config
.millennium
.bundle
.external_bin
.as_ref()
.map(|bins| bins.iter().find(|b| b == &&program_as_string || b == &&program_no_ext_as_string))
.unwrap_or_default();
if let Some(sidecar) = configured_sidecar {
context
.window
.state::<Scopes>()
.shell
.prepare_sidecar(&program.to_string_lossy(), sidecar, args)
.map_err(crate::error::into_anyhow)?
} else {
return Err(crate::Error::SidecarNotAllowed(program).into_anyhow());
}
}
} else {
#[cfg(not(shell_execute))]
return Err(crate::Error::ApiNotAllowlisted("shell > execute".to_string()).into_anyhow());
#[cfg(shell_execute)]
match context.window.state::<Scopes>().shell.prepare(&program, args) {
Ok(cmd) => cmd,
Err(e) => {
#[cfg(debug_assertions)]
eprintln!("{}", e);
return Err(crate::Error::ProgramNotAllowed(PathBuf::from(program)).into_anyhow());
}
}
};
#[cfg(any(shell_execute, shell_sidecar))]
{
if let Some(cwd) = options.cwd {
command = command.current_dir(cwd);
}
if let Some(env) = options.env {
command = command.envs(env);
} else {
command = command.env_clear();
}
let (mut rx, child) = command.spawn()?;
let pid = child.pid();
command_childs().lock().unwrap().insert(pid, child);
crate::async_runtime::spawn(async move {
while let Some(event) = rx.recv().await {
if matches!(event, crate::api::process::CommandEvent::Terminated(_)) {
command_childs().lock().unwrap().remove(&pid);
}
let js = crate::api::ipc::format_callback(on_event_fn, &event).expect("unable to serialize CommandEvent");
let _ = context.window.eval(js.as_str());
}
});
Ok(pid)
}
}
#[module_command_handler(shell_script)]
fn stdin_write<R: Runtime>(_context: InvokeContext<R>, pid: ChildId, buffer: Buffer) -> super::Result<()> {
if let Some(child) = command_childs().lock().unwrap().get_mut(&pid) {
match buffer {
Buffer::Text(t) => child.write(t.as_bytes())?,
Buffer::Raw(r) => child.write(&r)?
}
}
Ok(())
}
#[module_command_handler(shell_script)]
fn kill_child<R: Runtime>(_context: InvokeContext<R>, pid: ChildId) -> super::Result<()> {
if let Some(child) = command_childs().lock().unwrap().remove(&pid) {
child.kill()?;
}
Ok(())
}
/// Open a (url) path with a default or specific browser opening program.
///
/// See [`crate::api::shell::open`] for how it handles security-related
/// measures.
#[module_command_handler(shell_open)]
fn open<R: Runtime>(context: InvokeContext<R>, path: String, with: Option<String>) -> super::Result<()> {
use std::str::FromStr;
with.as_deref()
// only allow pre-determined programs to be specified
.map(crate::api::shell::Program::from_str)
.transpose()
.map_err(Into::into)
// validate and open path
.and_then(|with| crate::api::shell::open(&context.window.state::<Scopes>().shell, path, with).map_err(Into::into))
}
}
#[cfg(test)]
mod tests {
use quickcheck::{Arbitrary, Gen};
use super::{Buffer, ChildId, CommandOptions, ExecuteArgs};
use crate::api::ipc::CallbackFn;
impl Arbitrary for CommandOptions {
fn arbitrary(g: &mut Gen) -> Self {
Self {
sidecar: false,
cwd: Option::arbitrary(g),
env: Option::arbitrary(g)
}
}
}
impl Arbitrary for Buffer {
fn arbitrary(g: &mut Gen) -> Self {
Buffer::Text(String::arbitrary(g))
}
}
#[cfg(shell_scope)]
impl Arbitrary for ExecuteArgs {
fn arbitrary(_: &mut Gen) -> Self {
ExecuteArgs::None
}
}
#[millennium_macros::module_command_test(shell_execute, "shell > execute")]
#[quickcheck_macros::quickcheck]
fn execute(_program: String, _args: ExecuteArgs, _on_event_fn: CallbackFn, _options: CommandOptions) {}
#[millennium_macros::module_command_test(shell_execute, "shell > execute or shell > sidecar")]
#[quickcheck_macros::quickcheck]
fn stdin_write(_pid: ChildId, _buffer: Buffer) {}
#[millennium_macros::module_command_test(shell_execute, "shell > execute or shell > sidecar")]
#[quickcheck_macros::quickcheck]
fn kill_child(_pid: ChildId) {}
#[millennium_macros::module_command_test(shell_open, "shell > open")]
#[quickcheck_macros::quickcheck]
fn open(_path: String, _with: Option<String>) {}
}
| 30.178988 | 117 | 0.679216 |
696ebb8cb662c8d635c46d573e8550f130e56ec8 | 639 | extern crate tetsy_wasm;
extern crate twasm_utils as utils;
use twasm_utils::logger;
use std::env;
fn main() {
logger::init();
let args = env::args().collect::<Vec<_>>();
if args.len() != 3 {
println!("Usage: {} input_file.wasm output_file.wasm", args[0]);
return;
}
// Loading module
let module = tetsy_wasm::deserialize_file(&args[1]).expect("Module deserialization to succeed");
let result = utils::inject_gas_counter(
module, &utils::rules::Set::default(), "env"
).expect("Failed to inject gas. Some forbidden opcodes?");
tetsy_wasm::serialize_to_file(&args[2], result).expect("Module serialization to succeed")
}
| 25.56 | 97 | 0.699531 |
e201420908e16470ab67efeab8617e80efd6ae98 | 794 | /*
* Copyright © 2020 Peter M. Stahl [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use include_dir::{include_dir, Dir};
pub const CATALAN_MODELS_DIRECTORY: Dir = include_dir!("models");
pub const CATALAN_TESTDATA_DIRECTORY: Dir = include_dir!("testdata");
| 36.090909 | 77 | 0.74937 |
fc504458f8cd9ca5a49d1a25999135146617da2f | 27,149 | // Copyright 2020 Ant Group. All rights reserved.
// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
//! A manager to cache all file system bootstrap into memory.
//!
//! All file system bootstrap will be loaded, validated and cached into memory when loading the
//! file system. And currently the cache layer only supports readonly file systems.
use std::collections::{BTreeMap, HashMap};
use std::ffi::{OsStr, OsString};
use std::io::SeekFrom;
use std::io::{ErrorKind, Read, Result};
use std::mem::size_of;
use std::os::unix::ffi::OsStrExt;
use std::sync::Arc;
use fuse_backend_rs::abi::linux_abi;
use fuse_backend_rs::api::filesystem::Entry;
use crate::metadata::layout::v5::{
rafsv5_alloc_bio_desc, rafsv5_validate_digest, RafsBlobEntry, RafsChunkFlags, RafsChunkInfo,
RafsV5BlobTable, RafsV5ChunkInfo, RafsV5Inode, RafsV5InodeFlags, RafsV5InodeOps,
RafsV5XAttrsTable, RAFSV5_ALIGNMENT,
};
use crate::metadata::layout::{bytes_to_os_str, parse_xattr, RAFS_ROOT_INODE};
use crate::metadata::{
Inode, RafsBioDesc, RafsError, RafsInode, RafsResult, RafsSuperBlobs, RafsSuperBlock,
RafsSuperInodes, RafsSuperMeta, XattrName, XattrValue, RAFS_INODE_BLOCKSIZE,
};
use crate::RafsIoReader;
use nydus_utils::digest::Algorithm;
use nydus_utils::{digest::RafsDigest, ByteSize};
pub struct CachedSuperBlockV5 {
s_blob: Arc<RafsV5BlobTable>,
s_meta: Arc<RafsSuperMeta>,
s_inodes: BTreeMap<Inode, Arc<CachedInodeV5>>,
digest_validate: bool,
}
impl CachedSuperBlockV5 {
pub fn new(meta: RafsSuperMeta, digest_validate: bool) -> Self {
CachedSuperBlockV5 {
s_blob: Arc::new(RafsV5BlobTable::new()),
s_inodes: BTreeMap::new(),
s_meta: Arc::new(meta),
digest_validate,
}
}
/// v5 layout is based on BFS, which means parents always are in front of children
fn load_all_inodes(&mut self, r: &mut RafsIoReader) -> Result<()> {
let mut dir_ino_set = Vec::new();
let mut entries = 0;
loop {
// Stopping after loading all inodes helps to append possible
// new structure to the tail of bootstrap in the future.
if entries >= self.s_meta.inode_table_entries {
break;
}
let mut inode = CachedInodeV5::new(self.s_blob.clone(), self.s_meta.clone());
match inode.load(&self.s_meta, r) {
Ok(_) => {
entries += 1;
trace!(
"got inode ino {} parent {} size {} child_idx {} child_cnt {}",
inode.ino(),
inode.parent(),
inode.size(),
inode.i_child_idx,
inode.i_child_cnt,
);
}
Err(ref e) if e.kind() == ErrorKind::UnexpectedEof => break,
Err(e) => {
error!("error when loading CachedInode {:?}", e);
return Err(e);
}
}
let child_inode = self.hash_inode(Arc::new(inode))?;
if child_inode.is_dir() {
// Delay associating dir inode to its parent because that will take
// a cloned inode object, which preventing us from using `Arc::get_mut`.
// Without `Arc::get_mut` during Cached meta setup(loading all inodes),
// we have to lock inode everywhere for mutability. It really hurts.
dir_ino_set.push(child_inode.i_ino);
continue;
}
self.add_into_parent(child_inode);
}
while !dir_ino_set.is_empty() {
let ino = dir_ino_set.pop().unwrap();
self.add_into_parent(self.get_node(ino)?);
}
debug!("all {} inodes loaded", self.s_inodes.len());
Ok(())
}
fn get_node(&self, ino: Inode) -> Result<Arc<CachedInodeV5>> {
Ok(self.s_inodes.get(&ino).ok_or_else(|| enoent!())?.clone())
}
fn get_node_mut(&mut self, ino: Inode) -> Result<&mut Arc<CachedInodeV5>> {
self.s_inodes.get_mut(&ino).ok_or_else(|| enoent!())
}
fn hash_inode(&mut self, inode: Arc<CachedInodeV5>) -> Result<Arc<CachedInodeV5>> {
if inode.is_hardlink() {
if let Some(i) = self.s_inodes.get(&inode.i_ino) {
if !i.i_data.is_empty() {
return Ok(inode);
}
}
}
let ino = inode.ino();
self.s_inodes.insert(inode.i_ino, inode);
self.get_node(ino)
}
fn add_into_parent(&mut self, child_inode: Arc<CachedInodeV5>) {
if let Ok(parent_inode) = self.get_node_mut(child_inode.parent()) {
Arc::get_mut(parent_inode)
.unwrap()
.add_child(child_inode.clone());
}
}
}
impl RafsSuperInodes for CachedSuperBlockV5 {
fn get_max_ino(&self) -> u64 {
self.s_inodes.len() as u64
}
fn get_inode(&self, ino: Inode, _digest_validate: bool) -> Result<Arc<dyn RafsInode>> {
self.s_inodes
.get(&ino)
.map_or(Err(enoent!()), |i| Ok(i.clone()))
}
fn validate_digest(
&self,
inode: Arc<dyn RafsInode>,
recursive: bool,
digester: Algorithm,
) -> Result<bool> {
rafsv5_validate_digest(inode, recursive, digester)
}
}
impl RafsSuperBlobs for CachedSuperBlockV5 {
fn get_blob_table(&self) -> Arc<RafsV5BlobTable> {
self.s_blob.clone()
}
}
impl RafsSuperBlock for CachedSuperBlockV5 {
fn load(&mut self, r: &mut RafsIoReader) -> Result<()> {
// FIXME: add validator for all load operations.
// Now the seek offset points to inode table, so we can easily
// find first inode offset.
r.seek(SeekFrom::Start(self.s_meta.inode_table_offset))?;
let mut offset = [0u8; size_of::<u32>()];
r.read_exact(&mut offset)?;
// The offset is aligned with 8 bytes to make it easier to
// validate RafsV5Inode.
let inode_offset = u32::from_le_bytes(offset) << 3;
// Load blob table.
r.seek(SeekFrom::Start(self.s_meta.blob_table_offset))?;
let mut blob_table = RafsV5BlobTable::new();
let meta = &self.s_meta;
// Load extended blob table if the bootstrap including
// extended blob table.
if meta.extended_blob_table_offset > 0 {
r.seek(SeekFrom::Start(meta.extended_blob_table_offset))?;
blob_table
.extended
.load(r, meta.extended_blob_table_entries as usize)?;
}
r.seek(SeekFrom::Start(meta.blob_table_offset))?;
blob_table.load(r, meta.blob_table_size)?;
self.s_blob = Arc::new(blob_table);
// Load all inodes started from first inode offset.
r.seek(SeekFrom::Start(inode_offset as u64))?;
self.load_all_inodes(r)?;
// Validate inode digest tree
let digester = self.s_meta.get_digester();
if self.digest_validate
&& !self.validate_digest(self.get_inode(RAFS_ROOT_INODE, false)?, true, digester)?
{
return Err(einval!("invalid inode digest"));
}
Ok(())
}
fn update(&self, _r: &mut RafsIoReader) -> RafsResult<()> {
Err(RafsError::Unsupported)
}
fn destroy(&mut self) {
self.s_inodes.clear();
}
}
#[derive(Default, Clone, Debug)]
pub struct CachedInodeV5 {
i_ino: Inode,
i_name: OsString,
i_digest: RafsDigest,
i_parent: u64,
i_mode: u32,
i_projid: u32,
i_uid: u32,
i_gid: u32,
i_flags: RafsV5InodeFlags,
i_size: u64,
i_blocks: u64,
i_nlink: u32,
i_child_idx: u32,
i_child_cnt: u32,
// extra info need cache
i_blksize: u32,
i_rdev: u32,
i_mtime_nsec: u32,
i_mtime: u64,
i_target: OsString, // for symbol link
i_xattr: HashMap<OsString, Vec<u8>>,
i_data: Vec<Arc<CachedChunkInfoV5>>,
i_child: Vec<Arc<CachedInodeV5>>,
i_blob_table: Arc<RafsV5BlobTable>,
i_meta: Arc<RafsSuperMeta>,
}
impl CachedInodeV5 {
pub fn new(blob_table: Arc<RafsV5BlobTable>, meta: Arc<RafsSuperMeta>) -> Self {
CachedInodeV5 {
i_blob_table: blob_table,
i_meta: meta,
..Default::default()
}
}
fn load_name(&mut self, name_size: usize, r: &mut RafsIoReader) -> Result<()> {
if name_size > 0 {
let mut name_buf = vec![0u8; name_size];
r.read_exact(name_buf.as_mut_slice())?;
self.i_name = bytes_to_os_str(&name_buf).to_os_string();
}
r.seek_to_next_aligned(name_size, RAFSV5_ALIGNMENT)?;
Ok(())
}
fn load_symlink(&mut self, symlink_size: usize, r: &mut RafsIoReader) -> Result<()> {
if self.is_symlink() && symlink_size > 0 {
let mut symbol_buf = vec![0u8; symlink_size];
r.read_exact(symbol_buf.as_mut_slice())?;
self.i_target = bytes_to_os_str(&symbol_buf).to_os_string();
}
r.seek_to_next_aligned(symlink_size, RAFSV5_ALIGNMENT)?;
Ok(())
}
fn load_xattr(&mut self, r: &mut RafsIoReader) -> Result<()> {
if self.has_xattr() {
let mut xattrs = RafsV5XAttrsTable::new();
r.read_exact(xattrs.as_mut())?;
let mut xattr_buf = vec![0u8; xattrs.aligned_size()];
r.read_exact(xattr_buf.as_mut_slice())?;
parse_xattr(&xattr_buf, xattrs.size(), |name, value| {
self.i_xattr.insert(name.to_os_string(), value);
true
})?;
}
Ok(())
}
fn load_chunk_info(&mut self, r: &mut RafsIoReader) -> Result<()> {
if self.is_reg() && self.i_child_cnt > 0 {
let mut chunk = RafsV5ChunkInfo::new();
for _i in 0..self.i_child_cnt {
chunk.load(r)?;
self.i_data.push(Arc::new(CachedChunkInfoV5::from(&chunk)));
}
}
Ok(())
}
pub fn load(&mut self, sb: &RafsSuperMeta, r: &mut RafsIoReader) -> Result<()> {
// RafsV5Inode...name...symbol link...chunks
let mut inode = RafsV5Inode::new();
// parse ondisk inode
// RafsV5Inode|name|symbol|xattr|chunks
r.read_exact(inode.as_mut())?;
self.copy_from_ondisk(&inode);
self.load_name(inode.i_name_size as usize, r)?;
self.load_symlink(inode.i_symlink_size as usize, r)?;
self.load_xattr(r)?;
self.load_chunk_info(r)?;
self.i_blksize = sb.block_size;
self.validate()?;
Ok(())
}
fn copy_from_ondisk(&mut self, inode: &RafsV5Inode) {
self.i_ino = inode.i_ino;
self.i_digest = inode.i_digest;
self.i_parent = inode.i_parent;
self.i_mode = inode.i_mode;
self.i_projid = inode.i_projid;
self.i_uid = inode.i_uid;
self.i_gid = inode.i_gid;
self.i_flags = inode.i_flags;
self.i_size = inode.i_size;
self.i_nlink = inode.i_nlink;
self.i_blocks = inode.i_blocks;
self.i_child_idx = inode.i_child_index;
self.i_child_cnt = inode.i_child_count;
self.i_rdev = inode.i_rdev;
self.i_mtime = inode.i_mtime;
self.i_mtime_nsec = inode.i_mtime_nsec;
}
fn add_child(&mut self, child: Arc<CachedInodeV5>) {
self.i_child.push(child);
if self.i_child.len() == (self.i_child_cnt as usize) {
// all children are ready, do sort
self.i_child.sort_by(|c1, c2| c1.i_name.cmp(&c2.i_name));
}
}
}
impl RafsInode for CachedInodeV5 {
fn validate(&self) -> Result<()> {
// TODO: validate
if self.is_symlink() && self.i_target.is_empty() {
return Err(einval!("invalid inode"));
}
Ok(())
}
#[inline]
fn get_entry(&self) -> Entry {
Entry {
attr: self.get_attr().into(),
inode: self.i_ino,
generation: 0,
attr_flags: 0,
attr_timeout: self.i_meta.attr_timeout,
entry_timeout: self.i_meta.entry_timeout,
}
}
#[inline]
fn get_attr(&self) -> linux_abi::Attr {
linux_abi::Attr {
ino: self.i_ino,
size: self.i_size,
blocks: self.i_blocks,
mode: self.i_mode,
nlink: self.i_nlink as u32,
blksize: RAFS_INODE_BLOCKSIZE,
rdev: self.i_rdev,
..Default::default()
}
}
fn get_symlink(&self) -> Result<OsString> {
if !self.is_symlink() {
Err(einval!("inode is not a symlink"))
} else {
Ok(self.i_target.clone())
}
}
fn get_child_by_name(&self, name: &OsStr) -> Result<Arc<dyn RafsInode>> {
let idx = self
.i_child
.binary_search_by(|c| c.i_name.as_os_str().cmp(name))
.map_err(|_| enoent!())?;
Ok(self.i_child[idx].clone())
}
#[inline]
fn get_child_by_index(&self, index: Inode) -> Result<Arc<dyn RafsInode>> {
Ok(self.i_child[index as usize].clone())
}
fn get_child_index(&self) -> Result<u32> {
Ok(self.i_child_idx)
}
#[inline]
fn get_child_count(&self) -> u32 {
self.i_child_cnt
}
#[inline]
fn get_chunk_info(&self, idx: u32) -> Result<Arc<dyn RafsChunkInfo>> {
Ok(self.i_data[idx as usize].clone())
}
fn has_xattr(&self) -> bool {
self.i_flags.contains(RafsV5InodeFlags::XATTR)
}
#[inline]
fn get_xattr(&self, name: &OsStr) -> Result<Option<XattrValue>> {
Ok(self.i_xattr.get(name).cloned())
}
fn get_xattrs(&self) -> Result<Vec<XattrName>> {
Ok(self
.i_xattr
.keys()
.map(|k| k.as_bytes().to_vec())
.collect::<Vec<XattrName>>())
}
fn is_dir(&self) -> bool {
self.i_mode & libc::S_IFMT == libc::S_IFDIR
}
fn is_symlink(&self) -> bool {
self.i_mode & libc::S_IFMT == libc::S_IFLNK
}
fn is_reg(&self) -> bool {
self.i_mode & libc::S_IFMT == libc::S_IFREG
}
fn is_hardlink(&self) -> bool {
!self.is_dir() && self.i_nlink > 1
}
fn name(&self) -> OsString {
self.i_name.clone()
}
fn flags(&self) -> u64 {
self.i_flags.bits()
}
fn get_digest(&self) -> RafsDigest {
self.i_digest
}
fn collect_descendants_inodes(
&self,
descendants: &mut Vec<Arc<dyn RafsInode>>,
) -> Result<usize> {
if !self.is_dir() {
return Err(enotdir!());
}
let mut child_dirs: Vec<Arc<dyn RafsInode>> = Vec::new();
for child_inode in &self.i_child {
if child_inode.is_dir() {
trace!("Got dir {:?}", child_inode.name());
child_dirs.push(child_inode.clone());
} else {
if child_inode.is_empty_size() {
continue;
}
descendants.push(child_inode.clone());
}
}
for d in child_dirs {
d.collect_descendants_inodes(descendants)?;
}
Ok(0)
}
fn alloc_bio_desc(&self, offset: u64, size: usize, user_io: bool) -> Result<RafsBioDesc> {
rafsv5_alloc_bio_desc(self, offset, size, user_io)
}
fn get_name_size(&self) -> u16 {
self.i_name.byte_size() as u16
}
fn get_symlink_size(&self) -> u16 {
if self.is_symlink() {
self.i_target.byte_size() as u16
} else {
0
}
}
impl_getter!(ino, i_ino, u64);
impl_getter!(parent, i_parent, u64);
impl_getter!(size, i_size, u64);
impl_getter!(rdev, i_rdev, u32);
impl_getter!(projid, i_projid, u32);
}
impl RafsV5InodeOps for CachedInodeV5 {
fn get_blob_by_index(&self, idx: u32) -> Result<Arc<RafsBlobEntry>> {
self.i_blob_table.get(idx)
}
fn get_blocksize(&self) -> u32 {
self.i_blksize
}
fn has_hole(&self) -> bool {
self.i_flags.contains(RafsV5InodeFlags::HAS_HOLE)
}
fn cast_ondisk(&self) -> Result<RafsV5Inode> {
let i_symlink_size = if self.is_symlink() {
self.get_symlink()?.byte_size() as u16
} else {
0
};
Ok(RafsV5Inode {
i_digest: self.i_digest,
i_parent: self.i_parent,
i_ino: self.i_ino,
i_projid: self.i_projid,
i_uid: self.i_uid,
i_gid: self.i_gid,
i_mode: self.i_mode,
i_size: self.i_size,
i_nlink: self.i_nlink,
i_blocks: self.i_blocks,
i_flags: self.i_flags,
i_child_index: self.i_child_idx,
i_child_count: self.i_child_cnt,
i_name_size: self.i_name.len() as u16,
i_symlink_size,
i_rdev: self.i_rdev,
i_mtime: self.i_mtime,
i_mtime_nsec: self.i_mtime_nsec,
i_reserved: [0; 8],
})
}
}
/// Cached information about an Rafs Data Chunk.
#[derive(Clone, Default, Debug)]
pub struct CachedChunkInfoV5 {
// block hash
c_block_id: Arc<RafsDigest>,
// blob containing the block
c_blob_index: u32,
// chunk index in blob
c_index: u32,
// position of the block within the file
c_file_offset: u64,
// offset of the block within the blob
c_compress_offset: u64,
c_decompress_offset: u64,
// size of the block, compressed
c_compr_size: u32,
c_decompress_size: u32,
c_flags: RafsChunkFlags,
}
impl CachedChunkInfoV5 {
pub fn new() -> Self {
CachedChunkInfoV5 {
..Default::default()
}
}
pub fn load(&mut self, r: &mut RafsIoReader) -> Result<()> {
let mut chunk = RafsV5ChunkInfo::new();
r.read_exact(chunk.as_mut())?;
self.copy_from_ondisk(&chunk);
Ok(())
}
fn copy_from_ondisk(&mut self, chunk: &RafsV5ChunkInfo) {
self.c_block_id = Arc::new(chunk.block_id);
self.c_blob_index = chunk.blob_index;
self.c_index = chunk.index;
self.c_compress_offset = chunk.compress_offset;
self.c_decompress_offset = chunk.decompress_offset;
self.c_decompress_size = chunk.decompress_size;
self.c_file_offset = chunk.file_offset;
self.c_compr_size = chunk.compress_size;
self.c_flags = chunk.flags;
}
}
impl RafsChunkInfo for CachedChunkInfoV5 {
fn block_id(&self) -> &RafsDigest {
&self.c_block_id
}
fn is_compressed(&self) -> bool {
self.c_flags.contains(RafsChunkFlags::COMPRESSED)
}
fn is_hole(&self) -> bool {
self.c_flags.contains(RafsChunkFlags::HOLECHUNK)
}
impl_getter!(blob_index, c_blob_index, u32);
impl_getter!(index, c_index, u32);
impl_getter!(compress_offset, c_compress_offset, u64);
impl_getter!(compress_size, c_compr_size, u32);
impl_getter!(decompress_offset, c_decompress_offset, u64);
impl_getter!(decompress_size, c_decompress_size, u32);
impl_getter!(file_offset, c_file_offset, u64);
impl_getter!(flags, c_flags, RafsChunkFlags);
}
impl From<&RafsV5ChunkInfo> for CachedChunkInfoV5 {
fn from(info: &RafsV5ChunkInfo) -> Self {
let mut chunk = CachedChunkInfoV5::new();
chunk.copy_from_ondisk(info);
chunk
}
}
#[cfg(test)]
mod cached_tests {
use std::cmp;
use std::ffi::{OsStr, OsString};
use std::fs::OpenOptions;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::os::unix::ffi::OsStrExt;
use std::sync::Arc;
use nydus_utils::ByteSize;
use crate::metadata::cached_v5::CachedInodeV5;
use crate::metadata::layout::v5::{
rafsv5_align, RafsV5BlobTable, RafsV5ChunkInfo, RafsV5Inode, RafsV5InodeWrapper,
RafsV5XAttrs,
};
use crate::metadata::{RafsInode, RafsStore, RafsSuperMeta};
use crate::{RafsIoReader, RafsIoWriter};
#[test]
fn test_load_inode() {
let mut f = OpenOptions::new()
.truncate(true)
.create(true)
.write(true)
.read(true)
.open("/tmp/buf_1")
.unwrap();
let mut writer = Box::new(f.try_clone().unwrap()) as RafsIoWriter;
let mut reader = Box::new(f.try_clone().unwrap()) as RafsIoReader;
let mut ondisk_inode = RafsV5Inode::new();
let file_name = OsString::from("c_inode_1");
let mut xattr = RafsV5XAttrs::default();
xattr.add(OsString::from("k1"), vec![1u8, 2u8, 3u8, 4u8]);
xattr.add(OsString::from("k2"), vec![10u8, 11u8, 12u8]);
ondisk_inode.i_name_size = file_name.byte_size() as u16;
ondisk_inode.i_child_count = 1;
ondisk_inode.i_ino = 3;
ondisk_inode.i_size = 8192;
ondisk_inode.i_mode = libc::S_IFREG;
let mut chunk = RafsV5ChunkInfo::new();
chunk.decompress_size = 8192;
chunk.decompress_offset = 0;
chunk.compress_offset = 0;
chunk.compress_size = 4096;
let inode = RafsV5InodeWrapper {
name: file_name.as_os_str(),
symlink: None,
inode: &ondisk_inode,
};
inode.store(&mut writer).unwrap();
chunk.store(&mut writer).unwrap();
xattr.store(&mut writer).unwrap();
f.seek(Start(0)).unwrap();
let meta = Arc::new(RafsSuperMeta::default());
let blob_table = Arc::new(RafsV5BlobTable::new());
let mut cached_inode = CachedInodeV5::new(blob_table, meta.clone());
cached_inode.load(&meta, &mut reader).unwrap();
// check data
assert_eq!(cached_inode.i_name, file_name.to_str().unwrap());
assert_eq!(cached_inode.i_child_cnt, 1);
let attr = cached_inode.get_attr();
assert_eq!(attr.ino, 3);
assert_eq!(attr.size, 8192);
let cached_chunk = cached_inode.get_chunk_info(0).unwrap();
assert_eq!(cached_chunk.compress_size(), 4096);
assert_eq!(cached_chunk.decompress_size(), 8192);
assert_eq!(cached_chunk.compress_offset(), 0);
assert_eq!(cached_chunk.decompress_offset(), 0);
let c_xattr = cached_inode.get_xattrs().unwrap();
for k in c_xattr.iter() {
let k = OsStr::from_bytes(&k);
let v = cached_inode.get_xattr(k).unwrap();
assert_eq!(xattr.get(k).cloned().unwrap(), v.unwrap());
}
// close file
drop(f);
std::fs::remove_file("/tmp/buf_1").unwrap();
}
#[test]
fn test_load_symlink() {
let mut f = OpenOptions::new()
.truncate(true)
.create(true)
.write(true)
.read(true)
.open("/tmp/buf_2")
.unwrap();
let mut writer = Box::new(f.try_clone().unwrap()) as RafsIoWriter;
let mut reader = Box::new(f.try_clone().unwrap()) as RafsIoReader;
let file_name = OsString::from("c_inode_2");
let symlink_name = OsString::from("c_inode_1");
let mut ondisk_inode = RafsV5Inode::new();
ondisk_inode.i_name_size = file_name.byte_size() as u16;
ondisk_inode.i_symlink_size = symlink_name.byte_size() as u16;
ondisk_inode.i_mode = libc::S_IFLNK;
let inode = RafsV5InodeWrapper {
name: file_name.as_os_str(),
symlink: Some(symlink_name.as_os_str()),
inode: &ondisk_inode,
};
inode.store(&mut writer).unwrap();
f.seek(Start(0)).unwrap();
let meta = Arc::new(RafsSuperMeta::default());
let blob_table = Arc::new(RafsV5BlobTable::new());
let mut cached_inode = CachedInodeV5::new(blob_table, meta.clone());
cached_inode.load(&meta, &mut reader).unwrap();
assert_eq!(cached_inode.i_name, "c_inode_2");
assert_eq!(cached_inode.get_symlink().unwrap(), symlink_name);
drop(f);
std::fs::remove_file("/tmp/buf_2").unwrap();
}
#[test]
fn test_alloc_bio_desc() {
let mut f = OpenOptions::new()
.truncate(true)
.create(true)
.write(true)
.read(true)
.open("/tmp/buf_3")
.unwrap();
let mut writer = Box::new(f.try_clone().unwrap()) as RafsIoWriter;
let mut reader = Box::new(f.try_clone().unwrap()) as RafsIoReader;
let file_name = OsString::from("c_inode_3");
let mut ondisk_inode = RafsV5Inode::new();
ondisk_inode.i_name_size = rafsv5_align(file_name.len()) as u16;
ondisk_inode.i_child_count = 4;
ondisk_inode.i_mode = libc::S_IFREG;
ondisk_inode.i_size = 1024 * 1024 * 3 + 8192;
let inode = RafsV5InodeWrapper {
name: file_name.as_os_str(),
symlink: None,
inode: &ondisk_inode,
};
inode.store(&mut writer).unwrap();
let mut size = ondisk_inode.i_size;
for i in 0..ondisk_inode.i_child_count {
let mut chunk = RafsV5ChunkInfo::new();
chunk.decompress_size = cmp::min(1024 * 1024, size as u32);
chunk.decompress_offset = (i * 1024 * 1024) as u64;
chunk.compress_size = chunk.decompress_size / 2;
chunk.compress_offset = ((i * 1024 * 1024) / 2) as u64;
chunk.file_offset = chunk.decompress_offset;
chunk.store(&mut writer).unwrap();
size -= chunk.decompress_size as u64;
}
f.seek(Start(0)).unwrap();
let mut meta = Arc::new(RafsSuperMeta::default());
Arc::get_mut(&mut meta).unwrap().block_size = 1024 * 1024;
let mut blob_table = Arc::new(RafsV5BlobTable::new());
Arc::get_mut(&mut blob_table)
.unwrap()
.add(String::from("123333"), 0, 0, 0, 0, 0);
let mut cached_inode = CachedInodeV5::new(blob_table, meta.clone());
cached_inode.load(&meta, &mut reader).unwrap();
let desc1 = cached_inode.alloc_bio_desc(0, 100, true).unwrap();
assert_eq!(desc1.bi_size, 100);
assert_eq!(desc1.bi_vec.len(), 1);
assert_eq!(desc1.bi_vec[0].offset, 0);
assert_eq!(desc1.bi_vec[0].blob.blob_id, "123333");
let desc2 = cached_inode
.alloc_bio_desc(1024 * 1024 - 100, 200, true)
.unwrap();
assert_eq!(desc2.bi_size, 200);
assert_eq!(desc2.bi_vec.len(), 2);
assert_eq!(desc2.bi_vec[0].offset, 1024 * 1024 - 100);
assert_eq!(desc2.bi_vec[0].size, 100);
assert_eq!(desc2.bi_vec[1].offset, 0);
assert_eq!(desc2.bi_vec[1].size, 100);
let desc3 = cached_inode
.alloc_bio_desc(1024 * 1024 + 8192, 1024 * 1024 * 4, true)
.unwrap();
assert_eq!(desc3.bi_size, 1024 * 1024 * 2);
assert_eq!(desc3.bi_vec.len(), 3);
assert_eq!(desc3.bi_vec[2].size, 8192);
drop(f);
std::fs::remove_file("/tmp/buf_3").unwrap();
}
}
| 32.788647 | 96 | 0.58039 |
91b895c8d2f011fc1e486c6caf51c44d8f9c7408 | 3,486 | use crate::components::{Focus, FontFace, NeedsPaint, OnInput, OnPaint, Parent, Position, Text};
use crate::widgets::WidgetSystem;
use cairo::Cairo;
use graphics_base::system::System;
use graphics_base::types::{EventInput, MouseButton};
use graphics_base::Result;
use hecs::{Component, Entity, RefMut, World};
/* fn find_parent<Q>(world: &World, entity: Entity) -> Option<Ref<Q>> where Q: Component {
world.get::<Q>(entity).ok().or_else(||{
let Parent(parent) = *world.get::<Parent>(entity).ok()?;
find_parent(world, parent)
})
} */
fn find_parent_mut<Q>(world: &World, entity: Entity) -> Option<RefMut<Q>>
where
Q: Component,
{
world.get_mut::<Q>(entity).ok().or_else(|| {
let Parent(parent) = *world.get::<Parent>(entity).ok()?;
find_parent_mut(world, parent)
})
}
pub struct TextBox;
pub struct TextBoxSystem {
on_paint: OnPaint,
on_input: OnInput,
}
impl TextBoxSystem {
pub fn new() -> Self {
Self {
on_paint: OnPaint::new(Self::on_paint),
on_input: OnInput::new(Self::on_input),
}
}
fn on_paint(world: &World, entity: Entity, cr: &Cairo) {
let mut query = world
.query_one::<(&Position, Option<(&Text, Option<&FontFace>)>)>(entity)
.unwrap();
cr.set_source_rgb(1.0, 1.0, 1.0).paint();
if let Some((&Position(pos), text_and_style)) = query.get() {
cr.set_source_rgb(0.0, 0.0, 0.0)
.rectangle(0.0, 0.0, pos.width, pos.height)
.stroke();
if let Some((Text(ref text), font_face)) = text_and_style {
if let Some(FontFace(font_face)) = font_face {
cr.set_font_face(&font_face);
}
let font_extents = cr.font_extents();
cr.move_to(
(pos.height - font_extents.height) / 2.0,
(pos.height + font_extents.height) / 2.0,
)
.show_text(text);
}
}
}
fn on_input(world: &mut World, entity: Entity, input: EventInput) -> Result<()> {
match input {
EventInput::MouseButtonDown {
button: MouseButton::Left,
..
} => {
let Focus(ref mut focus) = *find_parent_mut(world, entity).unwrap();
*focus = Some(entity);
}
EventInput::KeyPress { code } => {
{
let mut text = loop {
if let Ok(text) = world.get_mut(entity) {
break text;
}
world.insert_one(entity, Text::new("")).unwrap();
};
let Text(ref mut text) = &mut *text;
if code == '\x08' {
text.pop();
} else {
text.push(code);
}
}
world.insert_one(entity, NeedsPaint).unwrap();
}
_ => (),
}
Ok(())
}
}
impl WidgetSystem for TextBoxSystem {
type Widget = TextBox;
type Components = (OnPaint, OnInput);
fn components(&self) -> Self::Components {
(self.on_paint.clone(), self.on_input.clone())
}
}
impl System for TextBoxSystem {
fn run(&mut self, world: &mut World) -> Result<()> {
WidgetSystem::run(self, world)
}
}
| 29.294118 | 95 | 0.502582 |
d767347a3d1b6f1c7819379efd7696b6588b8e7e | 2,217 | // Copyright 2013-2017 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg(feature = "swizzle")]
extern crate cgmath;
use cgmath::{Point1, Point2, Point3, Vector1, Vector2, Vector3, Vector4};
// Sanity checks
#[test]
fn test_point_swizzle() {
let p1 = Point1::new(1.0);
let p2 = Point2::new(1.0, 2.0);
let p3 = Point3::new(1.0, 2.0, 3.0);
assert_eq!(p1.x(), p1);
assert_eq!(p2.x(), p1);
assert_eq!(p2.y(), Point1::new(2.0));
assert_eq!(p2.xx(), Point2::new(1.0, 1.0));
assert_eq!(p2.xy(), p2);
assert_eq!(p2.yx(), Point2::new(2.0, 1.0));
assert_eq!(p2.yy(), Point2::new(2.0, 2.0));
assert_eq!(p3.x(), p1);
assert_eq!(p3.y(), Point1::new(2.0));
assert_eq!(p3.xy(), p2);
assert_eq!(p3.zy(), Point2::new(3.0, 2.0));
assert_eq!(p3.yyx(), Point3::new(2.0, 2.0, 1.0));
}
#[test]
fn test_vector_swizzle() {
let p1 = Vector1::new(1.0);
let p2 = Vector2::new(1.0, 2.0);
let p3 = Vector3::new(1.0, 2.0, 3.0);
let p4 = Vector4::new(1.0, 2.0, 3.0, 4.0);
assert_eq!(p1.x(), p1);
assert_eq!(p2.x(), p1);
assert_eq!(p2.y(), Vector1::new(2.0));
assert_eq!(p2.xx(), Vector2::new(1.0, 1.0));
assert_eq!(p2.xy(), p2);
assert_eq!(p2.yx(), Vector2::new(2.0, 1.0));
assert_eq!(p2.yy(), Vector2::new(2.0, 2.0));
assert_eq!(p3.x(), p1);
assert_eq!(p3.y(), Vector1::new(2.0));
assert_eq!(p3.xy(), p2);
assert_eq!(p3.zy(), Vector2::new(3.0, 2.0));
assert_eq!(p3.yyx(), Vector3::new(2.0, 2.0, 1.0));
assert_eq!(p4.xyxy(), Vector4::new(1.0, 2.0, 1.0, 2.0));
}
| 35.758065 | 80 | 0.61705 |
87adfff335e24d228d1dff3d96b8a2a7ecc01a14 | 22,706 | use crate::prelude::*;
#[cfg(any(feature = "dtype-date32", feature = "dtype-date64"))]
use crate::chunked_array::temporal::{date32_as_datetime, date64_as_datetime};
use num::{Num, NumCast};
use std::{
fmt,
fmt::{Debug, Display, Formatter},
};
const LIMIT: usize = 25;
#[cfg(feature = "pretty_fmt")]
use comfy_table::presets::{ASCII_FULL, UTF8_FULL};
#[cfg(feature = "pretty_fmt")]
use comfy_table::*;
#[cfg(all(feature = "plain_fmt", not(feature = "pretty_fmt")))]
use prettytable::{Cell, Row, Table};
/// Some unit functions that just pass the integer values if we don't want all chrono functionality
#[cfg(not(feature = "temporal"))]
mod temporal {
pub struct DateTime<T>(T)
where
T: Copy;
impl<T> DateTime<T>
where
T: Copy,
{
pub fn date(&self) -> T {
self.0
}
}
pub fn date32_as_datetime(v: i32) -> DateTime<i32> {
DateTime(v)
}
pub fn date64_as_datetime(v: i64) -> DateTime<i64> {
DateTime(v)
}
pub fn time32_millisecond_as_time(v: i32) -> i32 {
v
}
pub fn time32_second_as_time(v: i32) -> i32 {
v
}
pub fn time64_nanosecond_as_time(v: i64) -> i64 {
v
}
pub fn time64_microsecond_as_time(v: i64) -> i64 {
v
}
pub fn timestamp_nanoseconds_as_datetime(v: i64) -> i64 {
v
}
pub fn timestamp_microseconds_as_datetime(v: i64) -> i64 {
v
}
pub fn timestamp_milliseconds_as_datetime(v: i64) -> i64 {
v
}
pub fn timestamp_seconds_as_datetime(v: i64) -> i64 {
v
}
}
#[cfg(any(feature = "plain_fmt", feature = "pretty_fmt"))]
use std::borrow::Cow;
#[cfg(not(feature = "temporal"))]
use temporal::*;
macro_rules! format_array {
($limit:expr, $f:ident, $a:expr, $dtype:expr, $name:expr, $array_type:expr) => {{
write!(
$f,
"shape: ({},)\n{}: '{}' [{}]\n[\n",
$a.len(),
$array_type,
$name,
$dtype
)?;
let truncate = matches!($a.dtype(), DataType::Utf8);
let limit = std::cmp::min($limit, $a.len());
let write = |v, f: &mut Formatter| {
if truncate {
let v = format!("{}", v);
let v_trunc = &v[..v
.char_indices()
.take(15)
.last()
.map(|(i, c)| i + c.len_utf8())
.unwrap_or(0)];
if v == v_trunc {
write!(f, "\t{}\n", v)?;
} else {
write!(f, "\t{}...\n", v_trunc)?;
}
} else {
write!(f, "\t{}\n", v)?;
};
Ok(())
};
if limit < $a.len() {
for i in 0..limit / 2 {
let v = $a.get_any_value(i);
write(v, $f)?;
}
write!($f, "\t...\n")?;
for i in (0..limit / 2).rev() {
let v = $a.get_any_value($a.len() - i - 1);
write(v, $f)?;
}
} else {
for i in 0..limit {
let v = $a.get_any_value(i);
write(v, $f)?;
}
}
write!($f, "]")
}};
}
#[cfg(feature = "object")]
fn format_object_array(
limit: usize,
f: &mut Formatter<'_>,
object: &dyn SeriesTrait,
name: &str,
array_type: &str,
) -> fmt::Result {
match object.dtype() {
DataType::Object(inner_type) => {
write!(
f,
"shape: ({},)\n{}: '{}' [o][{}]\n[\n",
object.len(),
array_type,
name,
inner_type
)?;
for i in 0..limit {
let v = object.str_value(i);
writeln!(f, "\t{}", v)?;
}
write!(f, "]")
}
_ => unreachable!(),
}
}
macro_rules! set_limit {
($self:ident) => {
std::cmp::min($self.len(), LIMIT)
};
}
impl<T> Debug for ChunkedArray<T>
where
T: PolarsNumericType,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let limit = set_limit!(self);
let dtype = format!("{:?}", T::get_dtype());
format_array!(limit, f, self, dtype, self.name(), "ChunkedArray")
}
}
impl Debug for ChunkedArray<BooleanType> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let limit = set_limit!(self);
format_array!(limit, f, self, "bool", self.name(), "ChunkedArray")
}
}
impl Debug for Utf8Chunked {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
format_array!(80, f, self, "str", self.name(), "ChunkedArray")
}
}
impl Debug for ListChunked {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let limit = set_limit!(self);
format_array!(limit, f, self, "list", self.name(), "ChunkedArray")
}
}
impl Debug for CategoricalChunked {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let limit = set_limit!(self);
format_array!(limit, f, self, "cat", self.name(), "ChunkedArray")
}
}
#[cfg(feature = "object")]
impl<T> Debug for ObjectChunked<T>
where
T: PolarsObject,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let limit = set_limit!(self);
let taker = self.take_rand();
let inner_type = T::type_name();
write!(
f,
"ChunkedArray: '{}' [o][{}]\n[\n",
self.name(),
inner_type
)?;
if limit < self.len() {
for i in 0..limit / 2 {
match taker.get(i) {
None => writeln!(f, "\tnull")?,
Some(val) => writeln!(f, "\t{}", val)?,
};
}
writeln!(f, "\t...")?;
for i in (0..limit / 2).rev() {
match taker.get(self.len() - i - 1) {
None => writeln!(f, "\tnull")?,
Some(val) => writeln!(f, "\t{}", val)?,
};
}
} else {
for i in 0..limit {
match taker.get(i) {
None => writeln!(f, "\tnull")?,
Some(val) => writeln!(f, "\t{}", val)?,
};
}
}
Ok(())
}
}
impl Debug for Series {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let limit = set_limit!(self);
match self.dtype() {
DataType::Boolean => format_array!(
limit,
f,
self.bool().unwrap(),
"bool",
self.name(),
"Series"
),
DataType::Utf8 => {
format_array!(limit, f, self.utf8().unwrap(), "str", self.name(), "Series")
}
DataType::UInt8 => {
format_array!(limit, f, self.u8().unwrap(), "u8", self.name(), "Series")
}
DataType::UInt16 => {
format_array!(limit, f, self.u16().unwrap(), "u6", self.name(), "Series")
}
DataType::UInt32 => {
format_array!(limit, f, self.u32().unwrap(), "u32", self.name(), "Series")
}
DataType::UInt64 => {
format_array!(limit, f, self.u64().unwrap(), "u64", self.name(), "Series")
}
DataType::Int8 => {
format_array!(limit, f, self.i8().unwrap(), "i8", self.name(), "Series")
}
DataType::Int16 => {
format_array!(limit, f, self.i16().unwrap(), "i16", self.name(), "Series")
}
DataType::Int32 => {
format_array!(limit, f, self.i32().unwrap(), "i32", self.name(), "Series")
}
DataType::Int64 => {
format_array!(limit, f, self.i64().unwrap(), "i64", self.name(), "Series")
}
DataType::Float32 => {
format_array!(limit, f, self.f32().unwrap(), "f32", self.name(), "Series")
}
DataType::Float64 => {
format_array!(limit, f, self.f64().unwrap(), "f64", self.name(), "Series")
}
DataType::Date32 => format_array!(
limit,
f,
self.date32().unwrap(),
"date32",
self.name(),
"Series"
),
DataType::Date64 => format_array!(
limit,
f,
self.date64().unwrap(),
"date64",
self.name(),
"Series"
),
DataType::List(_) => format_array!(
limit,
f,
self.list().unwrap(),
"list",
self.name(),
"Series"
),
#[cfg(feature = "object")]
DataType::Object(_) => {
format_object_array(limit, f, self.as_ref(), self.name(), "Series")
}
DataType::Categorical => format_array!(
limit,
f,
self.categorical().unwrap(),
"cat",
self.name(),
"Series"
),
dt => panic!("{:?} not impl", dt),
}
}
}
impl Display for Series {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Debug::fmt(self, f)
}
}
impl Debug for DataFrame {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self, f)
}
}
#[cfg(any(feature = "plain_fmt", feature = "pretty_fmt"))]
fn prepare_row(row: Vec<Cow<'_, str>>, n_first: usize, n_last: usize) -> Vec<String> {
fn make_str_val(v: &str) -> String {
let string_limit = 32;
let v_trunc = &v[..v
.char_indices()
.take(string_limit)
.last()
.map(|(i, c)| i + c.len_utf8())
.unwrap_or(0)];
if v == v_trunc {
v.to_string()
} else {
format!("{}...", v_trunc)
}
}
let reduce_columns = n_first + n_last < row.len();
let mut row_str = Vec::with_capacity(n_first + n_last + reduce_columns as usize);
for v in row[0..n_first].iter() {
row_str.push(make_str_val(v));
}
if reduce_columns {
row_str.push("...".to_string());
}
for v in row[row.len() - n_last..].iter() {
row_str.push(make_str_val(v));
}
row_str
}
impl Display for DataFrame {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let height = self.height();
if !self.columns.iter().all(|s| s.len() == height) {
panic!("The columns lengths in the DataFrame are not equal.");
}
let max_n_cols = std::env::var("POLARS_FMT_MAX_COLS")
.unwrap_or_else(|_| "8".to_string())
.parse()
.unwrap_or(8);
#[cfg(any(feature = "plain_fmt", feature = "pretty_fmt"))]
let max_n_rows = std::env::var("POLARS_FMT_MAX_ROWS")
.unwrap_or_else(|_| "8".to_string())
.parse()
.unwrap_or(8);
let (n_first, n_last) = if self.width() > max_n_cols {
((max_n_cols + 1) / 2, max_n_cols / 2)
} else {
(self.width(), 0)
};
let reduce_columns = n_first + n_last < self.width();
let field_to_str = |f: &Field| format!("{}\n---\n{}", f.name(), f.data_type());
let mut names = Vec::with_capacity(n_first + n_last + reduce_columns as usize);
let schema = self.schema();
let fields = schema.fields();
for field in fields[0..n_first].iter() {
names.push(field_to_str(field))
}
if reduce_columns {
names.push("...".to_string())
}
for field in fields[self.width() - n_last..].iter() {
names.push(field_to_str(field))
}
#[cfg(feature = "pretty_fmt")]
{
let mut table = Table::new();
let preset = if std::env::var("POLARS_FMT_NO_UTF8").is_ok() {
ASCII_FULL
} else {
UTF8_FULL
};
table
.load_preset(preset)
.set_content_arrangement(ContentArrangement::Dynamic)
.set_table_width(
std::env::var("POLARS_TABLE_WIDTH")
.map(|s| {
s.parse::<u16>()
.expect("could not parse table width argument")
})
.unwrap_or(100),
)
.set_header(names);
let mut rows = Vec::with_capacity(max_n_rows);
if self.height() > max_n_rows {
for i in 0..(max_n_rows / 2) {
let row = self.columns.iter().map(|s| s.str_value(i)).collect();
rows.push(prepare_row(row, n_first, n_last));
}
let dots = rows[0].iter().map(|_| "...".to_string()).collect();
rows.push(dots);
for i in (self.height() - max_n_rows / 2 - 1)..self.height() {
let row = self.columns.iter().map(|s| s.str_value(i)).collect();
rows.push(prepare_row(row, n_first, n_last));
}
for row in rows {
table.add_row(row);
}
} else {
for i in 0..max_n_rows {
if i < self.height() && self.width() > 0 {
let row = self.columns.iter().map(|s| s.str_value(i)).collect();
table.add_row(prepare_row(row, n_first, n_last));
} else {
break;
}
}
}
write!(f, "shape: {:?}\n{}", self.shape(), table)?;
}
#[cfg(not(any(feature = "plain_fmt", feature = "pretty_fmt")))]
{
write!(
f,
"shape: {:?}\nto see more, compile with 'plain_fmt' or 'pretty_fmt' feature",
self.shape()
)?;
}
#[cfg(all(feature = "plain_fmt", not(feature = "pretty_fmt")))]
{
let mut table = Table::new();
table.set_titles(Row::new(names.into_iter().map(|s| Cell::new(&s)).collect()));
let mut rows = Vec::with_capacity(max_n_rows);
if self.height() > max_n_rows {
for i in 0..(max_n_rows / 2) {
let row = self.columns.iter().map(|s| s.str_value(i)).collect();
rows.push(prepare_row(row, n_first, n_last));
}
let dots = rows[0].iter().map(|_| "...".to_string()).collect();
rows.push(dots);
for i in (self.height() - max_n_rows / 2 - 1)..self.height() {
let row = self.columns.iter().map(|s| s.str_value(i)).collect();
rows.push(prepare_row(row, n_first, n_last));
}
for row in rows {
table.add_row(Row::new(row.into_iter().map(|s| Cell::new(&s)).collect()));
}
} else {
for i in 0..max_n_rows {
if i < self.height() && self.width() > 0 {
let row = self.columns.iter().map(|s| s.str_value(i)).collect();
table.add_row(Row::new(
prepare_row(row, n_first, n_last)
.into_iter()
.map(|s| Cell::new(&s))
.collect(),
));
} else {
break;
}
}
}
write!(f, "shape: {:?}\n{}", self.shape(), table)?;
}
Ok(())
}
}
fn fmt_integer<T: Num + NumCast + Display>(
f: &mut Formatter<'_>,
width: usize,
v: T,
) -> fmt::Result {
write!(f, "{:>width$}", v, width = width)
}
fn fmt_float<T: Num + NumCast>(f: &mut Formatter<'_>, width: usize, v: T) -> fmt::Result {
let v: f64 = NumCast::from(v).unwrap();
if v == 0.0 {
write!(f, "{:>width$.1}", v, width = width)
} else if !(0.0001..=9999.).contains(&v) {
write!(f, "{:>width$e}", v, width = width)
} else {
write!(f, "{:>width$}", v, width = width)
}
}
impl Display for AnyValue<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let width = 0;
match self {
AnyValue::Null => write!(f, "null"),
AnyValue::UInt8(v) => write!(f, "{}", v),
AnyValue::UInt16(v) => write!(f, "{}", v),
AnyValue::UInt32(v) => write!(f, "{}", v),
AnyValue::UInt64(v) => write!(f, "{}", v),
AnyValue::Int8(v) => fmt_integer(f, width, *v),
AnyValue::Int16(v) => fmt_integer(f, width, *v),
AnyValue::Int32(v) => fmt_integer(f, width, *v),
AnyValue::Int64(v) => fmt_integer(f, width, *v),
AnyValue::Float32(v) => fmt_float(f, width, *v),
AnyValue::Float64(v) => fmt_float(f, width, *v),
AnyValue::Boolean(v) => write!(f, "{}", *v),
AnyValue::Utf8(v) => write!(f, "{}", format!("\"{}\"", v)),
#[cfg(feature = "dtype-date32")]
AnyValue::Date32(v) => write!(f, "{}", date32_as_datetime(*v).date()),
#[cfg(feature = "dtype-date64")]
AnyValue::Date64(v) => write!(f, "{}", date64_as_datetime(*v)),
AnyValue::List(s) => write!(f, "{}", s.fmt_list()),
#[cfg(feature = "object")]
AnyValue::Object(_) => write!(f, "object"),
}
}
}
macro_rules! fmt_option {
($opt:expr) => {{
match $opt {
Some(v) => format!("{}", v),
None => "null".to_string(),
}
}};
}
macro_rules! impl_fmt_list {
($self:ident) => {{
match $self.len() {
0 => format!("[]"),
1 => format!("[{}]", fmt_option!($self.get(0))),
2 => format!(
"[{}, {}]",
fmt_option!($self.get(0)),
fmt_option!($self.get(1))
),
3 => format!(
"[{}, {}, {}]",
fmt_option!($self.get(0)),
fmt_option!($self.get(1)),
fmt_option!($self.get(2))
),
_ => format!(
"[{}, {}, ... {}]",
fmt_option!($self.get(0)),
fmt_option!($self.get(1)),
fmt_option!($self.get($self.len() - 1))
),
}
}};
}
pub(crate) trait FmtList {
fn fmt_list(&self) -> String;
}
impl<T> FmtList for ChunkedArray<T>
where
T: PolarsNumericType,
T::Native: fmt::Display,
{
fn fmt_list(&self) -> String {
impl_fmt_list!(self)
}
}
impl FmtList for BooleanChunked {
fn fmt_list(&self) -> String {
impl_fmt_list!(self)
}
}
impl FmtList for Utf8Chunked {
fn fmt_list(&self) -> String {
impl_fmt_list!(self)
}
}
impl FmtList for ListChunked {
fn fmt_list(&self) -> String {
impl_fmt_list!(self)
}
}
impl FmtList for CategoricalChunked {
fn fmt_list(&self) -> String {
impl_fmt_list!(self)
}
}
#[cfg(feature = "object")]
impl<T> FmtList for ObjectChunked<T> {
fn fmt_list(&self) -> String {
todo!()
}
}
#[cfg(all(
test,
feature = "temporal",
feature = "dtype-date32",
feature = "dtype-date64"
))]
mod test {
use crate::prelude::*;
#[test]
fn test_fmt_list() {
let mut builder = ListPrimitiveChunkedBuilder::<Int32Type>::new("a", 10, 10);
builder.append_slice(Some(&[1, 2, 3]));
builder.append_slice(None);
let list = builder.finish().into_series();
println!("{:?}", list);
assert_eq!(
r#"shape: (2,)
Series: 'a' [list]
[
[1, 2, 3]
null
]"#,
format!("{:?}", list)
);
}
#[test]
#[cfg(feature = "dtype-time64-ns")]
fn test_fmt_temporal() {
let s = Date32Chunked::new_from_opt_slice("date32", &[Some(1), None, Some(3)]);
assert_eq!(
r#"shape: (3,)
Series: 'date32' [date32]
[
1970-01-02
null
1970-01-04
]"#,
format!("{:?}", s.into_series())
);
let s = Date64Chunked::new_from_opt_slice("", &[Some(1), None, Some(1_000_000_000_000)]);
assert_eq!(
r#"shape: (3,)
Series: '' [date64]
[
1970-01-01 00:00:00.001
null
2001-09-09 01:46:40
]"#,
format!("{:?}", s.into_series())
);
let s = Time64NanosecondChunked::new_from_slice(
"",
&[1_000_000, 37_800_005_000_000, 86_399_210_000_000],
);
assert_eq!(
r#"shape: (3,)
Series: '' [time64(ns)]
[
00:00:00.001
10:30:00.005
23:59:59.210
]"#,
format!("{:?}", s.into_series())
)
}
#[test]
fn test_fmt_chunkedarray() {
let ca = Int32Chunked::new_from_opt_slice("date32", &[Some(1), None, Some(3)]);
println!("{:?}", ca);
assert_eq!(
r#"shape: (3,)
ChunkedArray: 'date32' [Int32]
[
1
null
3
]"#,
format!("{:?}", ca)
);
let ca = Utf8Chunked::new_from_slice("name", &["a", "b"]);
println!("{:?}", ca);
assert_eq!(
r#"shape: (2,)
ChunkedArray: 'name' [str]
[
"a"
"b"
]"#,
format!("{:?}", ca)
);
}
#[test]
fn test_fmt_series() {
let s = Series::new("foo", &["Somelongstringto eeat wit me oundaf"]);
dbg!(&s);
assert_eq!(
r#"shape: (1,)
Series: 'foo' [str]
[
"Somelongstring...
]"#,
format!("{:?}", s)
);
let s = Series::new("foo", &["😀😁😂😃😄😅😆😇😈😉😊😋😌😎😏😐😑😒😓"]);
dbg!(&s);
assert_eq!(
r#"shape: (1,)
Series: 'foo' [str]
[
"😀😁😂😃😄😅😆😇😈😉😊😋😌😎...
]"#,
format!("{:?}", s)
);
let s = Series::new("foo", &["yzäöüäöüäöüäö"]);
dbg!(&s);
assert_eq!(
r#"shape: (1,)
Series: 'foo' [str]
[
"yzäöüäöüäöüäö"
]"#,
format!("{:?}", s)
);
let s = Series::new("foo", (0..100).collect::<Vec<_>>());
dbg!(&s);
assert_eq!(
r#"shape: (100,)
Series: 'foo' [i32]
[
0
1
2
3
4
5
6
7
8
9
10
11
...
88
89
90
91
92
93
94
95
96
97
98
99
]"#,
format!("{:?}", s)
);
}
}
| 28.171216 | 99 | 0.446182 |
6a83d75c94c211e51a618a4c47a35006deefa42e | 5,040 | use crate::boards::idx_coord::BoardCoord;
use crate::boards::{board_cols, board_rows};
use crate::constants::colors::{
DARK_GRAY, DARK_GREEN, FAINT_BLUE, FAINT_RED, LIGHT_BLUE, LIGHT_GRAY, RED, WHITE,
};
use crate::system::letter_mesh::make_letter_mesh;
use crate::system::math::{pt, Offset, OffsetTuple, Point};
use crate::system::mesh_helper::MeshHelper;
use crate::system::PlayState::ModeSelection;
use crate::system::TurnState::{SelectingMove, SelectingPiece};
use crate::tablut::render_mode_selection::render_mode_selection;
use crate::tablut::{Move, Square, State};
use ggez::graphics::DrawMode;
use ggez::{Context, GameResult};
pub(super) fn render(
ctx: &mut Context,
mesh_helper: &mut MeshHelper,
state: &State,
) -> GameResult<()> {
if state.play_state == ModeSelection {
render_mode_selection(ctx, mesh_helper, state)
} else {
render_game(ctx, mesh_helper, state)
}
}
fn render_game(ctx: &mut Context, mesh_helper: &mut MeshHelper, state: &State) -> GameResult<()> {
let cell_size = mesh_helper.calc_height(0.09);
let board_start = pt(cell_size * board_cols() as f32 * 0.5, cell_size);
let grid = mesh_helper.make_grid(
ctx,
cell_size * board_cols() as f32,
cell_size * board_rows() as f32,
board_cols(),
board_rows(),
2.,
LIGHT_GRAY,
None,
)?;
let rect = mesh_helper.make_rect(
ctx,
cell_size * board_cols() as f32,
cell_size * board_rows() as f32,
DrawMode::stroke(2.),
)?;
let cell = mesh_helper.make_rect(ctx, cell_size, cell_size, DrawMode::fill())?;
mesh_helper.draw_mesh(ctx, grid.as_ref(), board_start);
mesh_helper.draw_mesh(ctx, rect.as_ref(), board_start);
mesh_helper.draw_coloured_mesh(ctx, cell.as_ref(), board_start, DARK_GREEN);
mesh_helper.draw_coloured_mesh(
ctx,
cell.as_ref(),
board_start.offset(cell_size * (board_cols() as f32 - 1.), 0.),
DARK_GREEN,
);
mesh_helper.draw_coloured_mesh(
ctx,
cell.as_ref(),
board_start.offset(
cell_size * (board_cols() as f32 - 1.),
cell_size * (board_cols() as f32 - 1.),
),
DARK_GREEN,
);
mesh_helper.draw_coloured_mesh(
ctx,
cell.as_ref(),
board_start.offset(0., cell_size * (board_cols() as f32 - 1.)),
DARK_GREEN,
);
mesh_helper.draw_coloured_mesh(
ctx,
cell.as_ref(),
board_start.offset(cell_size * 4., cell_size * 4.),
DARK_GRAY,
);
let a = make_letter_mesh(ctx, mesh_helper, cell_size, 'a')?;
let d = make_letter_mesh(ctx, mesh_helper, cell_size, 'd')?;
let k = make_letter_mesh(ctx, mesh_helper, cell_size, 'k')?;
state.board.iter().enumerate().for_each(|(idx, square)| {
let mut pt: Point = BoardCoord::from(idx).into();
pt = pt.multiply(cell_size, cell_size).offset_point(board_start);
let mesh = match square {
Square::Empty => None,
Square::King => Some((k.as_ref(), FAINT_BLUE)),
Square::Defender => Some((d.as_ref(), FAINT_BLUE)),
Square::Attacker => Some((a.as_ref(), FAINT_RED)),
};
if let Some((mesh, color)) = mesh {
mesh_helper.draw_coloured_mesh(ctx, mesh, pt, color);
}
});
if state.play_state.is_human(SelectingPiece) {
state
.cursor
.render(ctx, mesh_helper, board_start, cell_size)?;
for mov in state.get_moves_for_selected_piece() {
draw_move(ctx, mesh_helper, cell_size, board_start, &mov, false)?;
}
} else if state.play_state.is_either(SelectingMove) {
state
.cursor
.render_dark(ctx, mesh_helper, board_start, cell_size)?;
draw_move(
ctx,
mesh_helper,
cell_size,
board_start,
&state.get_selected_move(),
true,
)?;
}
Ok(())
}
fn draw_move(
ctx: &mut Context,
mesh_helper: &mut MeshHelper,
cell_size: f32,
board_start: Point,
mov: &Move,
highlight: bool,
) -> GameResult<()> {
let move_mesh =
mesh_helper.make_circle(ctx, cell_size, cell_size * 0.1, DrawMode::stroke(1.))?;
let capture_mesh = make_letter_mesh(ctx, mesh_helper, cell_size * 0.3, 'x')?;
mesh_helper.draw_coloured_mesh(
ctx,
move_mesh.as_ref(),
Point::from(BoardCoord::from(mov.dest))
.multiply(cell_size, cell_size)
.offset_point(board_start),
if highlight { LIGHT_BLUE } else { WHITE },
);
for capture in &mov.capturing {
mesh_helper.draw_coloured_mesh(
ctx,
capture_mesh.as_ref(),
Point::from(BoardCoord::from(*capture))
.multiply(cell_size, cell_size)
.offset_point(board_start)
.offset(cell_size * 0.35, cell_size * 0.35),
RED,
);
}
Ok(())
}
| 32.101911 | 98 | 0.600992 |
1666fade8b9b0ab09b5b1afe784c04f9a87fdd85 | 9,880 | use std::prelude::v1::*;
use std::cmp::min;
use std::fmt::{self, Debug};
use std::io::{Result as IoResult, Seek, SeekFrom, Write};
use std::ptr;
// taken from pcompress implementation
// https://github.com/moinakg/pcompress
const PRIME: u64 = 153_191u64;
const MASK: u64 = 0x00ff_ffff_ffffu64;
const MIN_SIZE: usize = 16 * 1024; // minimal chunk size, 16k
const AVG_SIZE: usize = 32 * 1024; // average chunk size, 32k
const MAX_SIZE: usize = 64 * 1024; // maximum chunk size, 64k
// Irreducible polynomial for Rabin modulus, from pcompress
const FP_POLY: u64 = 0xbfe6_b8a5_bf37_8d83u64;
// since we will skip MIN_SIZE when sliding window, it only
// needs to target (AVG_SIZE - MIN_SIZE) cut length,
// note the (AVG_SIZE - MIN_SIZE) must be 2^n
const CUT_MASK: u64 = (AVG_SIZE - MIN_SIZE - 1) as u64;
// rolling hash window constants
const WIN_SIZE: usize = 16; // must be 2^n
const WIN_MASK: usize = WIN_SIZE - 1;
const WIN_SLIDE_OFFSET: usize = 64;
const WIN_SLIDE_POS: usize = MIN_SIZE - WIN_SLIDE_OFFSET;
// writer buffer length
const WTR_BUF_LEN: usize = 8 * MAX_SIZE;
/// Pre-calculated chunker parameters
#[derive(Clone, Deserialize, Serialize)]
pub struct ChunkerParams {
poly_pow: u64, // poly power
out_map: Vec<u64>, // pre-computed out byte map, length is 256
ir: Vec<u64>, // irreducible polynomial, length is 256
}
impl ChunkerParams {
pub fn new() -> Self {
let mut cp = ChunkerParams::default();
// calculate poly power, it is actually PRIME ^ WIN_SIZE
for _ in 0..WIN_SIZE {
cp.poly_pow = (cp.poly_pow * PRIME) & MASK;
}
// pre-calculate out map table and irreducible polynomial
// for each possible byte, copy from PCompress implementation
for i in 0..256 {
cp.out_map[i] = (i as u64 * cp.poly_pow) & MASK;
let (mut term, mut pow, mut val) = (1u64, 1u64, 1u64);
for _ in 0..WIN_SIZE {
if (term & FP_POLY) != 0 {
val += (pow * i as u64) & MASK;
}
pow = (pow * PRIME) & MASK;
term *= 2;
}
cp.ir[i] = val;
}
cp
}
}
impl Debug for ChunkerParams {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ChunkerParams()")
}
}
impl Default for ChunkerParams {
fn default() -> Self {
let mut ret = ChunkerParams {
poly_pow: 1,
out_map: vec![0u64; 256],
ir: vec![0u64; 256],
};
ret.out_map.shrink_to_fit();
ret.ir.shrink_to_fit();
ret
}
}
/// Chunker
pub struct Chunker<W: Write + Seek> {
dst: W, // destination writer
params: ChunkerParams, // chunker parameters
pos: usize,
chunk_len: usize,
buf_clen: usize,
win_idx: usize,
roll_hash: u64,
win: [u8; WIN_SIZE], // rolling hash circle window
buf: Vec<u8>, // chunker buffer, fixed size: WTR_BUF_LEN
}
impl<W: Write + Seek> Chunker<W> {
pub fn new(params: ChunkerParams, dst: W) -> Self {
let mut buf = vec![0u8; WTR_BUF_LEN];
buf.shrink_to_fit();
Chunker {
dst,
params,
pos: WIN_SLIDE_POS,
chunk_len: WIN_SLIDE_POS,
buf_clen: 0,
win_idx: 0,
roll_hash: 0,
win: [0u8; WIN_SIZE],
buf,
}
}
pub fn into_inner(mut self) -> IoResult<W> {
self.flush()?;
Ok(self.dst)
}
}
impl<W: Write + Seek> Write for Chunker<W> {
// consume bytes stream, output chunks
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
if buf.is_empty() {
return Ok(0);
}
// copy source data into chunker buffer
let in_len = min(WTR_BUF_LEN - self.buf_clen, buf.len());
assert!(in_len > 0);
self.buf[self.buf_clen..self.buf_clen + in_len]
.copy_from_slice(&buf[..in_len]);
self.buf_clen += in_len;
while self.pos < self.buf_clen {
// get current byte and pushed out byte
let ch = self.buf[self.pos];
let out = self.win[self.win_idx] as usize;
let pushed_out = self.params.out_map[out];
// calculate Rabin rolling hash
self.roll_hash = (self.roll_hash * PRIME) & MASK;
self.roll_hash += u64::from(ch);
self.roll_hash = self.roll_hash.wrapping_sub(pushed_out) & MASK;
// forward circle window
self.win[self.win_idx] = ch;
self.win_idx = (self.win_idx + 1) & WIN_MASK;
self.chunk_len += 1;
self.pos += 1;
if self.chunk_len >= MIN_SIZE {
let chksum = self.roll_hash ^ self.params.ir[out];
// reached cut point, chunk can be produced now
if (chksum & CUT_MASK) == 0 || self.chunk_len >= MAX_SIZE {
// write the chunk to destination writer,
// ensure it is consumed in whole
let p = self.pos - self.chunk_len;
let written = self.dst.write(&self.buf[p..self.pos])?;
assert_eq!(written, self.chunk_len);
// not enough space in buffer, copy remaining to
// the head of buffer and reset buf position
if self.pos + MAX_SIZE >= WTR_BUF_LEN {
let left_len = self.buf_clen - self.pos;
unsafe {
ptr::copy::<u8>(
self.buf[self.pos..].as_ptr(),
self.buf.as_mut_ptr(),
left_len,
);
}
self.buf_clen = left_len;
self.pos = 0;
}
// jump to next start sliding position
self.pos += WIN_SLIDE_POS;
self.chunk_len = WIN_SLIDE_POS;
}
}
}
Ok(in_len)
}
fn flush(&mut self) -> IoResult<()> {
// flush remaining data to destination
let p = self.pos - self.chunk_len;
if p < self.buf_clen {
self.chunk_len = self.buf_clen - p;
let _ = self.dst.write(&self.buf[p..(p + self.chunk_len)])?;
}
// reset chunker
self.pos = WIN_SLIDE_POS;
self.chunk_len = WIN_SLIDE_POS;
self.buf_clen = 0;
self.win_idx = 0;
self.roll_hash = 0;
self.win = [0u8; WIN_SIZE];
self.dst.flush()
}
}
impl<W: Write + Seek> Debug for Chunker<W> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Chunker()")
}
}
impl<W: Write + Seek> Seek for Chunker<W> {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
self.dst.seek(pos)
}
}
#[cfg(test)]
mod tests {
use std::io::{copy, Cursor, Result as IoResult, Seek, SeekFrom, Write};
use std::time::Instant;
use super::*;
use base::crypto::{Crypto, RandomSeed, RANDOM_SEED_SIZE};
use base::init_env;
use base::utils::speed_str;
use content::chunk::Chunk;
#[derive(Debug)]
struct Sinker {
len: usize,
chks: Vec<Chunk>,
}
impl Write for Sinker {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
self.chks.push(Chunk::new(self.len, buf.len()));
self.len += buf.len();
Ok(buf.len())
}
fn flush(&mut self) -> IoResult<()> {
// verify
let sum = self.chks.iter().fold(0, |sum, ref t| sum + t.len);
assert_eq!(sum, self.len);
for i in 0..(self.chks.len() - 2) {
assert_eq!(
self.chks[i].pos + self.chks[i].len,
self.chks[i + 1].pos
);
}
Ok(())
}
}
impl Seek for Sinker {
fn seek(&mut self, _: SeekFrom) -> IoResult<u64> {
Ok(0)
}
}
#[derive(Debug)]
struct VoidSinker {}
impl Write for VoidSinker {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
Ok(buf.len())
}
fn flush(&mut self) -> IoResult<()> {
Ok(())
}
}
impl Seek for VoidSinker {
fn seek(&mut self, _: SeekFrom) -> IoResult<u64> {
Ok(0)
}
}
#[test]
fn chunker() {
init_env();
// perpare test data
const DATA_LEN: usize = 765 * 1024;
let params = ChunkerParams::new();
let mut data = vec![0u8; DATA_LEN];
Crypto::random_buf(&mut data);
let mut cur = Cursor::new(data);
let sinker = Sinker {
len: 0,
chks: Vec::new(),
};
// test chunker
let mut ckr = Chunker::new(params, sinker);
let result = copy(&mut cur, &mut ckr);
assert!(result.is_ok());
assert_eq!(result.unwrap(), DATA_LEN as u64);
ckr.flush().unwrap();
}
#[test]
fn chunker_perf() {
init_env();
// perpare test data
const DATA_LEN: usize = 10 * 1024 * 1024;
let params = ChunkerParams::new();
let mut data = vec![0u8; DATA_LEN];
let seed = RandomSeed::from(&[0u8; RANDOM_SEED_SIZE]);
Crypto::random_buf_deterministic(&mut data, &seed);
let mut cur = Cursor::new(data);
let sinker = VoidSinker {};
// test chunker performance
let mut ckr = Chunker::new(params, sinker);
let now = Instant::now();
copy(&mut cur, &mut ckr).unwrap();
ckr.flush().unwrap();
let time = now.elapsed();
println!("Chunker perf: {}", speed_str(&time, DATA_LEN));
}
}
| 29.492537 | 76 | 0.521053 |
c106ce37f1a7c692d5d5750d75f1a3b33e36b4d7 | 1,438 | //! Exchange records between workers.
use crate::ExchangeData;
use crate::dataflow::channels::pact::Exchange as ExchangePact;
use crate::dataflow::{Stream, Scope};
use crate::dataflow::operators::generic::operator::Operator;
/// Exchange records between workers.
pub trait Exchange<T, D: ExchangeData> {
/// Exchange records between workers.
///
/// The closure supplied should map a reference to a record to a `u64`,
/// whose value determines to which worker the record will be routed.
///
/// # Examples
/// ```
/// use timely::dataflow::operators::{ToStream, Exchange, Inspect};
///
/// timely::example(|scope| {
/// (0..10).to_stream(scope)
/// .exchange(|x| *x)
/// .inspect(|x| println!("seen: {:?}", x));
/// });
/// ```
fn exchange(&self, route: impl Fn(&D)->u64+'static) -> Self;
}
// impl<T: Timestamp, G: Scope<Timestamp=T>, D: ExchangeData> Exchange<T, D> for Stream<G, D> {
impl<G: Scope, D: ExchangeData> Exchange<G::Timestamp, D> for Stream<G, D> {
fn exchange(&self, route: impl Fn(&D)->u64+'static) -> Stream<G, D> {
let mut vector = Vec::new();
self.unary(ExchangePact::new(route), "Exchange", move |_,_| move |input, output| {
input.for_each(|time, data| {
data.swap(&mut vector);
output.session(&time).give_vec(&mut vector);
});
})
}
}
| 35.95 | 95 | 0.582058 |
e26ed7c844380f686cd7b5cdd9fa406d19a5af6b | 6,984 | #![no_std]
#![no_main]
use super::state_code::StateCode;
#[repr(u16)]
#[derive(Copy, Clone)]
pub enum Request {
SayHi,
Init,
Dispense(u8), // diespense type should contain data.
HaltAction,
HaltActionCancel,
RemoveCount,
GetTotalDispensed,
RemoveTotalCount,
StateCheck,
ErrorCheck,
}
pub enum Error {
WrongCommand,
WrongStart,
WrongHash,
WrongStartHash,
WrongUnknown,
}
impl Request {
const FAULT_LARGE_CAPITAL: [u8; 3] = [b'N', b'S', b'!'];
const FAULT_SMALL_CAPITAL: [u8; 3] = [b'b', b's', b'!'];
fn hash(array: &[u8; 5]) -> u8 {
array[1] + array[2] + array[3]
}
fn is_valid_hash(array: &[u8; 5]) -> bool {
Request::hash(array) == array[4]
}
fn from_core_data(core_data: (u8, u8, u8)) -> Result<Request, Error> {
match core_data {
(b'H', b'I', b'?') => Ok(Request::SayHi),
(b'I' | b'i', 0x00, 0x00) => Ok(Request::Init),
(b'D', _, b'S') | (b'd', _, b's') => Ok(Request::Dispense(core_data.1)),
(b'H' | b'h', 0x00, 0x00) => Ok(Request::HaltAction),
(b'H', b'C', b'?') | (b'h', b'c', b'?') => Ok(Request::HaltActionCancel),
(b'R', b'E', b'M') | (b'r', b'e', b'm') => Ok(Request::RemoveCount),
(b'G', b'T', b'?') | (b'g', b't', b'?') => Ok(Request::GetTotalDispensed),
(b'C', b'T', b'C') | (b'c', b't', b'c') => Ok(Request::RemoveTotalCount),
(b'S' | b's', 0x00, 0x00) => Ok(Request::StateCheck),
(b'S', b'E', b'R') | (b's', b'e', b'r') => Ok(Request::ErrorCheck),
_ => Err(Error::WrongCommand),
}
}
fn to_core_data(&self, capital: bool) -> [u8; 3] {
match self {
&Self::SayHi => caparr_TTd!(b'H', b'I', b'?', capital),
&Self::Init => caparr_Tdd!(b'I', 0x00, 0x00, capital),
&Self::Dispense(byte) => caparr_TdT!(b'D', byte, b'S', capital),
&Self::HaltAction => caparr_Tdd!(b'H', 0x00, 0x00, capital),
&Self::HaltActionCancel => caparr_TTd!(b'H', b'C', b'?', capital),
&Self::RemoveCount => caparr_TTT!(b'R', b'E', b'M', capital),
&Self::GetTotalDispensed => caparr_TTd!(b'G', b'T', b'?', capital),
&Self::RemoveTotalCount => caparr_TTT!(b'C', b'T', b'C', capital),
&Self::StateCheck => caparr_Tdd!(b's', 0x00, 0x00, capital),
&Self::ErrorCheck => caparr_TTT!(b'S', b'E', b'R', capital),
}
}
pub fn from_array(array: &[u8; 5]) -> Result<Request, Error> {
match (array[0], Request::is_valid_hash(array)) {
(b'$', true) => Request::from_core_data((array[1], array[2], array[3])),
(_, true) => Err(Error::WrongStart),
(b'$', false) => Err(Error::WrongHash),
_ => Err(Error::WrongStartHash),
}
}
pub fn action_possibility(&self, state: &StateCode) -> bool {
match (self) {
// - Say Hi -
// SayHi always echo action.
Request::SayHi => true,
// - Init -
Request::Init => true, // not sure about this.
// - Dispense -
Request::Dispense(0) => false, // Dispnese 0 paper is not allowed.
Request::Dispense(_) => match (state) {
// Dispense is not allowed while busy.
StateCode::WhileDispensing => false,
// When halted(inhibit mode) not allowed.
StateCode::ActionHalted => false,
// If there's problem, not allowed.
StateCode::ProblemDispense(_) => false,
_ => true,
},
// - HaltAction -
Request::HaltAction => match (state) {
// Halted on halt action now allowed (???)
StateCode::ActionHalted => false,
_ => true,
},
// - HaltActionCancel -
Request::HaltActionCancel => match (state) {
// I don't know
_ => true,
},
// - RemoveCount -
Request::RemoveCount => match (state) {
// WhileDispensing counting value is locked, thus not allowed.
StateCode::WhileDispensing => false,
_ => true,
},
// - GetTotalDispensed -
Request::GetTotalDispensed => match (state) {
// WhileDispensing counting value is locked, thus not allowed.
StateCode::WhileDispensing => false,
_ => true,
},
// - RemoveTotalCount -
Request::RemoveTotalCount => match (state) {
// WhileDispensing counting value is locked, thus not allowed.
StateCode::WhileDispensing => false,
_ => true,
},
// - StateCheck & ErrorCheck -
Request::StateCheck => true, // always allowed to read.
Request::ErrorCheck => true, // always allowed to read.
}
}
pub fn action_response(
&self,
state: &StateCode,
capital: bool,
extra: Option<u32>,
) -> (bool, Option<[u8; 3]>, Option<[u8; 3]>) {
let possible = self.action_possibility(state);
let core_data_1 = match (possible) {
false => match (capital) {
true => Request::FAULT_LARGE_CAPITAL,
false => Request::FAULT_SMALL_CAPITAL,
},
true => match (self) {
&Self::SayHi => caparr_TTd!(b'M', b'E', b'!', capital),
&Self::Init => caparr_Tdd!(b'I', 0x00, b'A', capital),
&Self::Dispense(byte) => caparr_TdT!(b'D', byte, b'A', capital),
&Self::HaltAction => caparr_Tdd!(b'H', 0x00, b'A', capital),
&Self::HaltActionCancel => caparr_TTd!(b'H', b'C', b'?', capital),
&Self::RemoveCount => caparr_TdT!(b'R', 0xFF, b'M', capital),
&Self::GetTotalDispensed => {
caparr_Tdd!(
b'T',
(extra.unwrap_or(0) >> 24) as u8,
(extra.unwrap_or(0) >> 16) as u8,
capital
)
}
&Self::RemoveTotalCount => caparr_TTT!(b'C', b'T', b'C', capital),
&Self::StateCheck => caparr_Tdd!(b's', 0x00, 0x00, capital),
&Self::ErrorCheck => caparr_TTT!(b'S', b'E', b'R', capital),
},
};
match (self) {
&Self::GetTotalDispensed => (
possible,
Some(core_data_1),
Some(caparr_Tdd!(
b'G',
(extra.unwrap_or(0) >> 8) as u8,
(extra.unwrap_or(0)) as u8,
capital
)),
),
_ => (possible, Some(core_data_1), None),
}
}
}
| 36.757895 | 86 | 0.47437 |
e4b4b180d1dede8dc377df47f9fdbd2c6912f2ba | 9,928 | #[doc = r"Enumeration of all the interrupts."]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u16)]
pub enum Interrupt {
#[doc = "0 - CORE_TIMER"]
CORE_TIMER = 0,
#[doc = "1 - CORE_SOFTWARE_0"]
CORE_SOFTWARE_0 = 1,
#[doc = "2 - CORE_SOFTWARE_1"]
CORE_SOFTWARE_1 = 2,
#[doc = "3 - EXTERNAL_0"]
EXTERNAL_0 = 3,
#[doc = "4 - TIMER_1"]
TIMER_1 = 4,
#[doc = "5 - INPUT_CAPTURE_1"]
INPUT_CAPTURE_1 = 5,
#[doc = "6 - OUTPUT_COMPARE_1"]
OUTPUT_COMPARE_1 = 6,
#[doc = "7 - EXTERNAL_1"]
EXTERNAL_1 = 7,
#[doc = "8 - TIMER_2"]
TIMER_2 = 8,
#[doc = "9 - INPUT_CAPTURE_2"]
INPUT_CAPTURE_2 = 9,
#[doc = "10 - OUTPUT_COMPARE_2"]
OUTPUT_COMPARE_2 = 10,
#[doc = "11 - EXTERNAL_2"]
EXTERNAL_2 = 11,
#[doc = "12 - TIMER_3"]
TIMER_3 = 12,
#[doc = "13 - INPUT_CAPTURE_3"]
INPUT_CAPTURE_3 = 13,
#[doc = "14 - OUTPUT_COMPARE_3"]
OUTPUT_COMPARE_3 = 14,
#[doc = "15 - EXTERNAL_3"]
EXTERNAL_3 = 15,
#[doc = "16 - TIMER_4"]
TIMER_4 = 16,
#[doc = "17 - INPUT_CAPTURE_4"]
INPUT_CAPTURE_4 = 17,
#[doc = "18 - OUTPUT_COMPARE_4"]
OUTPUT_COMPARE_4 = 18,
#[doc = "19 - EXTERNAL_4"]
EXTERNAL_4 = 19,
#[doc = "20 - TIMER_5"]
TIMER_5 = 20,
#[doc = "21 - INPUT_CAPTURE_5"]
INPUT_CAPTURE_5 = 21,
#[doc = "22 - OUTPUT_COMPARE_5"]
OUTPUT_COMPARE_5 = 22,
#[doc = "23 - ADC"]
ADC = 23,
#[doc = "24 - FAIL_SAFE_MONITOR"]
FAIL_SAFE_MONITOR = 24,
#[doc = "25 - RTCC"]
RTCC = 25,
#[doc = "26 - FCE"]
FCE = 26,
#[doc = "27 - COMPARATOR_1"]
COMPARATOR_1 = 27,
#[doc = "28 - COMPARATOR_2"]
COMPARATOR_2 = 28,
#[doc = "29 - COMPARATOR_3"]
COMPARATOR_3 = 29,
#[doc = "31 - SPI_1"]
SPI_1 = 31,
#[doc = "32 - UART_1"]
UART_1 = 32,
#[doc = "33 - I2C_1"]
I2C_1 = 33,
#[doc = "34 - CHANGE_NOTICE"]
CHANGE_NOTICE = 34,
#[doc = "35 - PMP"]
PMP = 35,
#[doc = "36 - SPI_2"]
SPI_2 = 36,
#[doc = "37 - UART_2"]
UART_2 = 37,
#[doc = "38 - I2C_2"]
I2C_2 = 38,
#[doc = "39 - CTMU"]
CTMU = 39,
#[doc = "40 - DMA_0"]
DMA_0 = 40,
#[doc = "41 - DMA_1"]
DMA_1 = 41,
#[doc = "42 - DMA_2"]
DMA_2 = 42,
#[doc = "43 - DMA_3"]
DMA_3 = 43,
}
#[derive(Debug, Copy, Clone)]
pub struct TryFromInterruptError(());
impl Interrupt {
#[inline]
pub fn try_from(value: u8) -> Result<Self, TryFromInterruptError> {
match value {
0 => Ok(Interrupt::CORE_TIMER),
1 => Ok(Interrupt::CORE_SOFTWARE_0),
2 => Ok(Interrupt::CORE_SOFTWARE_1),
3 => Ok(Interrupt::EXTERNAL_0),
4 => Ok(Interrupt::TIMER_1),
5 => Ok(Interrupt::INPUT_CAPTURE_1),
6 => Ok(Interrupt::OUTPUT_COMPARE_1),
7 => Ok(Interrupt::EXTERNAL_1),
8 => Ok(Interrupt::TIMER_2),
9 => Ok(Interrupt::INPUT_CAPTURE_2),
10 => Ok(Interrupt::OUTPUT_COMPARE_2),
11 => Ok(Interrupt::EXTERNAL_2),
12 => Ok(Interrupt::TIMER_3),
13 => Ok(Interrupt::INPUT_CAPTURE_3),
14 => Ok(Interrupt::OUTPUT_COMPARE_3),
15 => Ok(Interrupt::EXTERNAL_3),
16 => Ok(Interrupt::TIMER_4),
17 => Ok(Interrupt::INPUT_CAPTURE_4),
18 => Ok(Interrupt::OUTPUT_COMPARE_4),
19 => Ok(Interrupt::EXTERNAL_4),
20 => Ok(Interrupt::TIMER_5),
21 => Ok(Interrupt::INPUT_CAPTURE_5),
22 => Ok(Interrupt::OUTPUT_COMPARE_5),
23 => Ok(Interrupt::ADC),
24 => Ok(Interrupt::FAIL_SAFE_MONITOR),
25 => Ok(Interrupt::RTCC),
26 => Ok(Interrupt::FCE),
27 => Ok(Interrupt::COMPARATOR_1),
28 => Ok(Interrupt::COMPARATOR_2),
29 => Ok(Interrupt::COMPARATOR_3),
31 => Ok(Interrupt::SPI_1),
32 => Ok(Interrupt::UART_1),
33 => Ok(Interrupt::I2C_1),
34 => Ok(Interrupt::CHANGE_NOTICE),
35 => Ok(Interrupt::PMP),
36 => Ok(Interrupt::SPI_2),
37 => Ok(Interrupt::UART_2),
38 => Ok(Interrupt::I2C_2),
39 => Ok(Interrupt::CTMU),
40 => Ok(Interrupt::DMA_0),
41 => Ok(Interrupt::DMA_1),
42 => Ok(Interrupt::DMA_2),
43 => Ok(Interrupt::DMA_3),
_ => Err(TryFromInterruptError(())),
}
}
}
#[cfg(feature = "rt")]
#[macro_export]
#[doc = r" Assigns a handler to an interrupt"]
#[doc = r""]
#[doc = r" This macro takes two arguments: the name of an interrupt and the path to the"]
#[doc = r" function that will be used as the handler of that interrupt. That function"]
#[doc = r" must have signature `fn()`."]
#[doc = r""]
#[doc = r" Optionally, a third argument may be used to declare interrupt local data."]
#[doc = r" The handler will have exclusive access to these *local* variables on each"]
#[doc = r" invocation. If the third argument is used then the signature of the handler"]
#[doc = r" function must be `fn(&mut $NAME::Locals)` where `$NAME` is the first argument"]
#[doc = r" passed to the macro."]
#[doc = r""]
#[doc = r" # Example"]
#[doc = r""]
#[doc = r" ``` ignore"]
#[doc = r" interrupt!(TIM2, periodic);"]
#[doc = r""]
#[doc = r" fn periodic() {"]
#[doc = r#" print!(".");"#]
#[doc = r" }"]
#[doc = r""]
#[doc = r" interrupt!(TIM3, tick, locals: {"]
#[doc = r" tick: bool = false;"]
#[doc = r" });"]
#[doc = r""]
#[doc = r" fn tick(locals: &mut TIM3::Locals) {"]
#[doc = r" locals.tick = !locals.tick;"]
#[doc = r""]
#[doc = r" if locals.tick {"]
#[doc = r#" println!("Tick");"#]
#[doc = r" } else {"]
#[doc = r#" println!("Tock");"#]
#[doc = r" }"]
#[doc = r" }"]
#[doc = r" ```"]
macro_rules ! interrupt { ( $ NAME : ident , $ path : path , locals : { $ ( $ lvar : ident : $ lty : ty = $ lval : expr ; ) * } ) => { # [ allow ( non_snake_case ) ] mod $ NAME { pub struct Locals { $ ( pub $ lvar : $ lty , ) * } } # [ allow ( non_snake_case ) ] # [ no_mangle ] pub extern "C" fn $ NAME ( ) { let _ = $ crate :: interrupt :: Interrupt :: $ NAME ; static mut LOCALS : self :: $ NAME :: Locals = self :: $ NAME :: Locals { $ ( $ lvar : $ lval , ) * } ; let f : fn ( & mut self :: $ NAME :: Locals ) = $ path ; f ( unsafe { & mut LOCALS } ) ; } } ; ( $ NAME : ident , $ path : path ) => { # [ allow ( non_snake_case ) ] # [ no_mangle ] pub extern "C" fn $ NAME ( ) { let _ = $ crate :: interrupt :: Interrupt :: $ NAME ; let f : fn ( ) = $ path ; f ( ) ; } } }
#[doc = r"Enumeration of all the interrupt sources"]
#[derive(Copy, Clone, Debug)]
#[repr(u8)]
pub enum InterruptSource {
#[doc = "Core Timer"]
CORE_TIMER = 0,
#[doc = "Core Software 0"]
CORE_SOFTWARE_0 = 1,
#[doc = "Core Software 1"]
CORE_SOFTWARE_1 = 2,
#[doc = "Ext INT 0"]
EXTERNAL_0 = 3,
#[doc = "Timer 1"]
TIMER_1 = 4,
#[doc = "Input Capature 1 Error"]
INPUT_CAPTURE_ERROR_1 = 5,
#[doc = "Input Capture 1"]
INPUT_CAPTURE_1 = 6,
#[doc = "Output Compare 1"]
OUTPUT_COMPARE_1 = 7,
#[doc = "Ext INT 1"]
EXTERNAL_1 = 8,
#[doc = "Timer 2"]
TIMER_2 = 9,
#[doc = "Input Capture 2 Error"]
INPUT_CAPTURE_ERROR_2 = 10,
#[doc = "Input Capture 2"]
INPUT_CAPTURE_2 = 11,
#[doc = "Output Compare 2"]
OUTPUT_COMPARE_2 = 12,
#[doc = "External INT 2"]
EXTERNAL_2 = 13,
#[doc = "Timer 3"]
TIMER_3 = 14,
#[doc = "Input Capture 3 Error"]
INPUT_CAPTURE_ERROR_3 = 15,
#[doc = "Input Capture 3"]
INPUT_CAPTURE_3 = 16,
#[doc = "Output Compare 3"]
OUTPUT_COMPARE_3 = 17,
#[doc = "Ext INT 3"]
EXTERNAL_3 = 18,
#[doc = "Timer 4"]
TIMER_4 = 19,
#[doc = "Input Capture 4 Error"]
INPUT_CAPTURE_ERROR_4 = 20,
#[doc = "Input Capture 4"]
INPUT_CAPTURE_4 = 21,
#[doc = "Output Compare 4"]
OUTPUT_COMPARE_4 = 22,
#[doc = "Ext INT 4"]
EXTERNAL_4 = 23,
#[doc = "Timer 5"]
TIMER_5 = 24,
#[doc = "Input Capture 5 Error"]
INPUT_CAPTURE_ERROR_5 = 25,
#[doc = "Input Capture 5"]
INPUT_CAPTURE_5 = 26,
#[doc = "Output Compare 5"]
OUTPUT_COMPARE_5 = 27,
#[doc = "ADC1 Convert Done"]
ADC = 28,
#[doc = "Fail Safe Clock Monitor"]
FAIL_SAFE_MONITOR = 29,
#[doc = "Real Time Clock Calendar"]
RTCC = 30,
FLASH_CONTROL = 31,
#[doc = "Comparator 1"]
COMPARATOR_1 = 32,
#[doc = "Comparator 2"]
COMPARATOR_2 = 33,
#[doc = "Comparator 3"]
COMPARATOR_3 = 34,
#[doc = "USB"]
USB = 35,
#[doc = "SPI1 Fault"]
SPI1_ERR = 36,
#[doc = "SPI1 Receive Done"]
SPI1_RX = 37,
#[doc = "SPI1 Transmit Done"]
SPI1_TX = 38,
#[doc = "UART1 Error"]
UART1_ERR = 39,
#[doc = "UART1 Receiver"]
UART1_RX = 40,
#[doc = "UART1 Trasmitter"]
UART1_TX = 41,
#[doc = "I2C1 Bus Collision Event"]
I2C1_BUS = 42,
#[doc = "I2C1 Slave Event"]
I2C1_SLAVE = 43,
#[doc = "I2C1 Master Event"]
I2C1_MASTER = 44,
#[doc = "Part A Change Notice"]
CHANGE_NOTICE_A = 45,
#[doc = "Part B Change Notice"]
CHANGE_NOTICE_B = 46,
#[doc = "Part C Change Notice"]
CHANGE_NOTICE_C = 47,
#[doc = "Parallel Master Port"]
PMP = 48,
#[doc = "PMP Error"]
PMP_ERROR = 49,
#[doc = "SPI2 Error"]
SPI2_ERR = 50,
#[doc = "SPI2 Receiver"]
SPI2_RX = 51,
#[doc = "SPI2 Trasmitter"]
SPI2_TX = 52,
#[doc = "UART2 Error"]
UART2_ERR = 53,
#[doc = "UART2 Receiver"]
UART2_RX = 54,
#[doc = "UART2 Trasmitter"]
UART2_TX = 55,
#[doc = "I2C2 Bus Collision Event"]
I2C2_BUS = 56,
#[doc = "I2C2 Slave Event"]
I2C2_SLAVE = 57,
#[doc = "I2C2 Master Event"]
I2C2_MASTER = 58,
#[doc = "CTMU Event"]
CTMU = 59,
DMA0 = 60,
DMA1 = 61,
DMA2 = 62,
DMA3 = 63,
}
| 31.820513 | 774 | 0.533038 |
fef07e76072c2e754f512558848b27f29b7fe075 | 22,445 | #[cfg(not(target_arch = "bpf"))]
use crate::{
address_lookup_table_account::AddressLookupTableAccount,
message::v0::{LoadedAddresses, MessageAddressTableLookup},
};
use {
crate::{instruction::Instruction, message::MessageHeader, pubkey::Pubkey},
std::collections::BTreeMap,
thiserror::Error,
};
/// A helper struct to collect pubkeys compiled for a set of instructions
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub(crate) struct CompiledKeys {
payer: Option<Pubkey>,
key_meta_map: BTreeMap<Pubkey, CompiledKeyMeta>,
}
#[cfg_attr(target_arch = "bpf", allow(dead_code))]
#[derive(PartialEq, Debug, Error, Eq, Clone)]
pub enum CompileError {
#[error("account index overflowed during compilation")]
AccountIndexOverflow,
#[error("address lookup table index overflowed during compilation")]
AddressTableLookupIndexOverflow,
#[error("encountered unknown account key `{0}` during instruction compilation")]
UnknownInstructionKey(Pubkey),
}
#[derive(Default, Debug, Clone, PartialEq, Eq)]
struct CompiledKeyMeta {
is_signer: bool,
is_writable: bool,
is_invoked: bool,
}
impl CompiledKeys {
/// Compiles the pubkeys referenced by a list of instructions and organizes by
/// signer/non-signer and writable/readonly.
pub(crate) fn compile(instructions: &[Instruction], payer: Option<Pubkey>) -> Self {
let mut key_meta_map = BTreeMap::<Pubkey, CompiledKeyMeta>::new();
for ix in instructions {
let mut meta = key_meta_map.entry(ix.program_id).or_default();
meta.is_invoked = true;
for account_meta in &ix.accounts {
let meta = key_meta_map.entry(account_meta.pubkey).or_default();
meta.is_signer |= account_meta.is_signer;
meta.is_writable |= account_meta.is_writable;
}
}
if let Some(payer) = &payer {
let mut meta = key_meta_map.entry(*payer).or_default();
meta.is_signer = true;
meta.is_writable = true;
}
Self {
payer,
key_meta_map,
}
}
pub(crate) fn try_into_message_components(
self,
) -> Result<(MessageHeader, Vec<Pubkey>), CompileError> {
let try_into_u8 = |num: usize| -> Result<u8, CompileError> {
u8::try_from(num).map_err(|_| CompileError::AccountIndexOverflow)
};
let Self {
payer,
mut key_meta_map,
} = self;
if let Some(payer) = &payer {
key_meta_map.remove_entry(payer);
}
let writable_signer_keys: Vec<Pubkey> = payer
.into_iter()
.chain(
key_meta_map
.iter()
.filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)),
)
.collect();
let readonly_signer_keys: Vec<Pubkey> = key_meta_map
.iter()
.filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key))
.collect();
let writable_non_signer_keys: Vec<Pubkey> = key_meta_map
.iter()
.filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key))
.collect();
let readonly_non_signer_keys: Vec<Pubkey> = key_meta_map
.iter()
.filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key))
.collect();
let signers_len = writable_signer_keys
.len()
.saturating_add(readonly_signer_keys.len());
let header = MessageHeader {
num_required_signatures: try_into_u8(signers_len)?,
num_readonly_signed_accounts: try_into_u8(readonly_signer_keys.len())?,
num_readonly_unsigned_accounts: try_into_u8(readonly_non_signer_keys.len())?,
};
let static_account_keys = std::iter::empty()
.chain(writable_signer_keys)
.chain(readonly_signer_keys)
.chain(writable_non_signer_keys)
.chain(readonly_non_signer_keys)
.collect();
Ok((header, static_account_keys))
}
#[cfg(not(target_arch = "bpf"))]
pub(crate) fn try_extract_table_lookup(
&mut self,
lookup_table_account: &AddressLookupTableAccount,
) -> Result<Option<(MessageAddressTableLookup, LoadedAddresses)>, CompileError> {
let (writable_indexes, drained_writable_keys) = self
.try_drain_keys_found_in_lookup_table(&lookup_table_account.addresses, |meta| {
!meta.is_signer && !meta.is_invoked && meta.is_writable
})?;
let (readonly_indexes, drained_readonly_keys) = self
.try_drain_keys_found_in_lookup_table(&lookup_table_account.addresses, |meta| {
!meta.is_signer && !meta.is_invoked && !meta.is_writable
})?;
// Don't extract lookup if no keys were found
if writable_indexes.is_empty() && readonly_indexes.is_empty() {
return Ok(None);
}
Ok(Some((
MessageAddressTableLookup {
account_key: lookup_table_account.key,
writable_indexes,
readonly_indexes,
},
LoadedAddresses {
writable: drained_writable_keys,
readonly: drained_readonly_keys,
},
)))
}
#[cfg(not(target_arch = "bpf"))]
fn try_drain_keys_found_in_lookup_table(
&mut self,
lookup_table_addresses: &[Pubkey],
key_meta_filter: impl Fn(&CompiledKeyMeta) -> bool,
) -> Result<(Vec<u8>, Vec<Pubkey>), CompileError> {
let mut lookup_table_indexes = Vec::new();
let mut drained_keys = Vec::new();
for search_key in self
.key_meta_map
.iter()
.filter_map(|(key, meta)| key_meta_filter(meta).then(|| key))
{
for (key_index, key) in lookup_table_addresses.iter().enumerate() {
if key == search_key {
let lookup_table_index = u8::try_from(key_index)
.map_err(|_| CompileError::AddressTableLookupIndexOverflow)?;
lookup_table_indexes.push(lookup_table_index);
drained_keys.push(*search_key);
break;
}
}
}
for key in &drained_keys {
self.key_meta_map.remove_entry(key);
}
Ok((lookup_table_indexes, drained_keys))
}
}
#[cfg(test)]
mod tests {
use {super::*, crate::instruction::AccountMeta, bitflags::bitflags};
bitflags! {
pub struct KeyFlags: u8 {
const SIGNER = 0b00000001;
const WRITABLE = 0b00000010;
const INVOKED = 0b00000100;
}
}
impl From<KeyFlags> for CompiledKeyMeta {
fn from(flags: KeyFlags) -> Self {
Self {
is_signer: flags.contains(KeyFlags::SIGNER),
is_writable: flags.contains(KeyFlags::WRITABLE),
is_invoked: flags.contains(KeyFlags::INVOKED),
}
}
}
#[test]
fn test_compile_with_dups() {
let program_id0 = Pubkey::new_unique();
let program_id1 = Pubkey::new_unique();
let program_id2 = Pubkey::new_unique();
let program_id3 = Pubkey::new_unique();
let id0 = Pubkey::new_unique();
let id1 = Pubkey::new_unique();
let id2 = Pubkey::new_unique();
let id3 = Pubkey::new_unique();
let compiled_keys = CompiledKeys::compile(
&[
Instruction::new_with_bincode(
program_id0,
&0,
vec![
AccountMeta::new_readonly(id0, false),
AccountMeta::new_readonly(id1, true),
AccountMeta::new(id2, false),
AccountMeta::new(id3, true),
// duplicate the account inputs
AccountMeta::new_readonly(id0, false),
AccountMeta::new_readonly(id1, true),
AccountMeta::new(id2, false),
AccountMeta::new(id3, true),
// reference program ids
AccountMeta::new_readonly(program_id0, false),
AccountMeta::new_readonly(program_id1, true),
AccountMeta::new(program_id2, false),
AccountMeta::new(program_id3, true),
],
),
Instruction::new_with_bincode(program_id1, &0, vec![]),
Instruction::new_with_bincode(program_id2, &0, vec![]),
Instruction::new_with_bincode(program_id3, &0, vec![]),
],
None,
);
assert_eq!(
compiled_keys,
CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(id0, KeyFlags::empty().into()),
(id1, KeyFlags::SIGNER.into()),
(id2, KeyFlags::WRITABLE.into()),
(id3, (KeyFlags::SIGNER | KeyFlags::WRITABLE).into()),
(program_id0, KeyFlags::INVOKED.into()),
(program_id1, (KeyFlags::INVOKED | KeyFlags::SIGNER).into()),
(program_id2, (KeyFlags::INVOKED | KeyFlags::WRITABLE).into()),
(program_id3, KeyFlags::all().into()),
]),
}
);
}
#[test]
fn test_compile_with_dup_payer() {
let program_id = Pubkey::new_unique();
let payer = Pubkey::new_unique();
let compiled_keys = CompiledKeys::compile(
&[Instruction::new_with_bincode(
program_id,
&0,
vec![AccountMeta::new_readonly(payer, false)],
)],
Some(payer),
);
assert_eq!(
compiled_keys,
CompiledKeys {
payer: Some(payer),
key_meta_map: BTreeMap::from([
(payer, (KeyFlags::SIGNER | KeyFlags::WRITABLE).into()),
(program_id, KeyFlags::INVOKED.into()),
]),
}
);
}
#[test]
fn test_compile_with_dup_signer_mismatch() {
let program_id = Pubkey::new_unique();
let id0 = Pubkey::new_unique();
let compiled_keys = CompiledKeys::compile(
&[Instruction::new_with_bincode(
program_id,
&0,
vec![AccountMeta::new(id0, false), AccountMeta::new(id0, true)],
)],
None,
);
// Ensure the dup writable key is a signer
assert_eq!(
compiled_keys,
CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(id0, (KeyFlags::SIGNER | KeyFlags::WRITABLE).into()),
(program_id, KeyFlags::INVOKED.into()),
]),
}
);
}
#[test]
fn test_compile_with_dup_signer_writable_mismatch() {
let program_id = Pubkey::new_unique();
let id0 = Pubkey::new_unique();
let compiled_keys = CompiledKeys::compile(
&[Instruction::new_with_bincode(
program_id,
&0,
vec![
AccountMeta::new_readonly(id0, true),
AccountMeta::new(id0, true),
],
)],
None,
);
// Ensure the dup signer key is writable
assert_eq!(
compiled_keys,
CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(id0, (KeyFlags::SIGNER | KeyFlags::WRITABLE).into()),
(program_id, KeyFlags::INVOKED.into()),
]),
}
);
}
#[test]
fn test_compile_with_dup_nonsigner_writable_mismatch() {
let program_id = Pubkey::new_unique();
let id0 = Pubkey::new_unique();
let compiled_keys = CompiledKeys::compile(
&[
Instruction::new_with_bincode(
program_id,
&0,
vec![
AccountMeta::new_readonly(id0, false),
AccountMeta::new(id0, false),
],
),
Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]),
],
None,
);
// Ensure the dup nonsigner key is writable
assert_eq!(
compiled_keys,
CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(id0, KeyFlags::WRITABLE.into()),
(program_id, KeyFlags::INVOKED.into()),
]),
}
);
}
#[test]
fn test_try_into_message_components() {
let keys = vec![
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
];
let compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(keys[0], (KeyFlags::SIGNER | KeyFlags::WRITABLE).into()),
(keys[1], KeyFlags::SIGNER.into()),
(keys[2], KeyFlags::WRITABLE.into()),
(keys[3], KeyFlags::empty().into()),
]),
};
let result = compiled_keys.try_into_message_components();
assert_eq!(result.as_ref().err(), None);
let (header, static_keys) = result.unwrap();
assert_eq!(static_keys, keys);
assert_eq!(
header,
MessageHeader {
num_required_signatures: 2,
num_readonly_signed_accounts: 1,
num_readonly_unsigned_accounts: 1,
}
);
}
#[test]
fn test_try_into_message_components_with_too_many_keys() {
const TOO_MANY_KEYS: usize = 257;
for key_flags in [
KeyFlags::WRITABLE | KeyFlags::SIGNER,
KeyFlags::SIGNER,
// skip writable_non_signer_keys because it isn't used for creating header values
KeyFlags::empty(),
] {
let test_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from_iter(
(0..TOO_MANY_KEYS).map(|_| (Pubkey::new_unique(), key_flags.into())),
),
};
assert_eq!(
test_keys.try_into_message_components(),
Err(CompileError::AccountIndexOverflow)
);
}
}
#[test]
fn test_try_extract_table_lookup() {
let keys = vec![
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
];
let mut compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(keys[0], (KeyFlags::SIGNER | KeyFlags::WRITABLE).into()),
(keys[1], KeyFlags::SIGNER.into()),
(keys[2], KeyFlags::WRITABLE.into()),
(keys[3], KeyFlags::empty().into()),
(keys[4], (KeyFlags::INVOKED | KeyFlags::WRITABLE).into()),
(keys[5], (KeyFlags::INVOKED).into()),
]),
};
// add some duplicates to ensure lowest index is selected
let addresses = [keys.clone(), keys.clone()].concat();
let lookup_table_account = AddressLookupTableAccount {
key: Pubkey::new_unique(),
addresses,
};
assert_eq!(
compiled_keys.try_extract_table_lookup(&lookup_table_account),
Ok(Some((
MessageAddressTableLookup {
account_key: lookup_table_account.key,
writable_indexes: vec![2],
readonly_indexes: vec![3],
},
LoadedAddresses {
writable: vec![keys[2]],
readonly: vec![keys[3]],
},
)))
);
assert_eq!(compiled_keys.key_meta_map.len(), 4);
assert!(!compiled_keys.key_meta_map.contains_key(&keys[2]));
assert!(!compiled_keys.key_meta_map.contains_key(&keys[3]));
}
#[test]
fn test_try_extract_table_lookup_returns_none() {
let mut compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(Pubkey::new_unique(), KeyFlags::WRITABLE.into()),
(Pubkey::new_unique(), KeyFlags::empty().into()),
]),
};
let lookup_table_account = AddressLookupTableAccount {
key: Pubkey::new_unique(),
addresses: vec![],
};
let expected_compiled_keys = compiled_keys.clone();
assert_eq!(
compiled_keys.try_extract_table_lookup(&lookup_table_account),
Ok(None)
);
assert_eq!(compiled_keys, expected_compiled_keys);
}
#[test]
fn test_try_extract_table_lookup_for_invalid_table() {
let writable_key = Pubkey::new_unique();
let mut compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(writable_key, KeyFlags::WRITABLE.into()),
(Pubkey::new_unique(), KeyFlags::empty().into()),
]),
};
const MAX_LENGTH_WITHOUT_OVERFLOW: usize = u8::MAX as usize + 1;
let mut addresses = vec![Pubkey::default(); MAX_LENGTH_WITHOUT_OVERFLOW];
addresses.push(writable_key);
let lookup_table_account = AddressLookupTableAccount {
key: Pubkey::new_unique(),
addresses,
};
let expected_compiled_keys = compiled_keys.clone();
assert_eq!(
compiled_keys.try_extract_table_lookup(&lookup_table_account),
Err(CompileError::AddressTableLookupIndexOverflow),
);
assert_eq!(compiled_keys, expected_compiled_keys);
}
#[test]
fn test_try_drain_keys_found_in_lookup_table() {
let orig_keys = vec![
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
];
let mut compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([
(orig_keys[0], KeyFlags::empty().into()),
(orig_keys[1], KeyFlags::WRITABLE.into()),
(orig_keys[2], KeyFlags::WRITABLE.into()),
(orig_keys[3], KeyFlags::empty().into()),
(orig_keys[4], KeyFlags::empty().into()),
]),
};
let lookup_table_addresses = vec![
Pubkey::new_unique(),
orig_keys[0],
Pubkey::new_unique(),
orig_keys[4],
Pubkey::new_unique(),
orig_keys[2],
Pubkey::new_unique(),
];
let drain_result = compiled_keys
.try_drain_keys_found_in_lookup_table(&lookup_table_addresses, |meta| {
!meta.is_writable
});
assert_eq!(drain_result.as_ref().err(), None);
let (lookup_table_indexes, drained_keys) = drain_result.unwrap();
assert_eq!(
compiled_keys.key_meta_map.keys().collect::<Vec<&_>>(),
vec![&orig_keys[1], &orig_keys[2], &orig_keys[3]]
);
assert_eq!(drained_keys, vec![orig_keys[0], orig_keys[4]]);
assert_eq!(lookup_table_indexes, vec![1, 3]);
}
#[test]
fn test_try_drain_keys_found_in_lookup_table_with_empty_keys() {
let mut compiled_keys = CompiledKeys::default();
let lookup_table_addresses = vec![
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
];
let drain_result =
compiled_keys.try_drain_keys_found_in_lookup_table(&lookup_table_addresses, |_| true);
assert_eq!(drain_result.as_ref().err(), None);
let (lookup_table_indexes, drained_keys) = drain_result.unwrap();
assert!(drained_keys.is_empty());
assert!(lookup_table_indexes.is_empty());
}
#[test]
fn test_try_drain_keys_found_in_lookup_table_with_empty_table() {
let original_keys = vec![
Pubkey::new_unique(),
Pubkey::new_unique(),
Pubkey::new_unique(),
];
let mut compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from_iter(
original_keys
.iter()
.map(|key| (*key, CompiledKeyMeta::default())),
),
};
let lookup_table_addresses = vec![];
let drain_result =
compiled_keys.try_drain_keys_found_in_lookup_table(&lookup_table_addresses, |_| true);
assert_eq!(drain_result.as_ref().err(), None);
let (lookup_table_indexes, drained_keys) = drain_result.unwrap();
assert_eq!(compiled_keys.key_meta_map.len(), original_keys.len());
assert!(drained_keys.is_empty());
assert!(lookup_table_indexes.is_empty());
}
#[test]
fn test_try_drain_keys_found_in_lookup_table_with_too_many_addresses() {
let key = Pubkey::new_unique();
let mut compiled_keys = CompiledKeys {
payer: None,
key_meta_map: BTreeMap::from([(key, CompiledKeyMeta::default())]),
};
const MAX_LENGTH_WITHOUT_OVERFLOW: usize = u8::MAX as usize + 1;
let mut lookup_table_addresses = vec![Pubkey::default(); MAX_LENGTH_WITHOUT_OVERFLOW];
lookup_table_addresses.push(key);
let drain_result =
compiled_keys.try_drain_keys_found_in_lookup_table(&lookup_table_addresses, |_| true);
assert_eq!(
drain_result.err(),
Some(CompileError::AddressTableLookupIndexOverflow)
);
}
}
| 34.637346 | 98 | 0.5411 |
1dcf5137e9604b5fefca4e35a4fa994ab4594d5d | 29 | aima.core.agent.AgentProgram
| 14.5 | 28 | 0.862069 |
8739914910b157240b9be251138a861eba1d24c2 | 34,075 | #![allow(unused_imports, non_camel_case_types)]
use crate::models::r4b::Attachment::Attachment;
use crate::models::r4b::CodeableConcept::CodeableConcept;
use crate::models::r4b::ContactPoint::ContactPoint;
use crate::models::r4b::Element::Element;
use crate::models::r4b::Extension::Extension;
use crate::models::r4b::HealthcareService_AvailableTime::HealthcareService_AvailableTime;
use crate::models::r4b::HealthcareService_Eligibility::HealthcareService_Eligibility;
use crate::models::r4b::HealthcareService_NotAvailable::HealthcareService_NotAvailable;
use crate::models::r4b::Identifier::Identifier;
use crate::models::r4b::Meta::Meta;
use crate::models::r4b::Narrative::Narrative;
use crate::models::r4b::Reference::Reference;
use crate::models::r4b::ResourceList::ResourceList;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// The details of a healthcare service available at a location.
#[derive(Debug)]
pub struct HealthcareService<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl HealthcareService<'_> {
pub fn new(value: &Value) -> HealthcareService {
HealthcareService {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for active
pub fn _active(&self) -> Option<Element> {
if let Some(val) = self.value.get("_active") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for appointmentRequired
pub fn _appointment_required(&self) -> Option<Element> {
if let Some(val) = self.value.get("_appointmentRequired") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for availabilityExceptions
pub fn _availability_exceptions(&self) -> Option<Element> {
if let Some(val) = self.value.get("_availabilityExceptions") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for comment
pub fn _comment(&self) -> Option<Element> {
if let Some(val) = self.value.get("_comment") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for extraDetails
pub fn _extra_details(&self) -> Option<Element> {
if let Some(val) = self.value.get("_extraDetails") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for implicitRules
pub fn _implicit_rules(&self) -> Option<Element> {
if let Some(val) = self.value.get("_implicitRules") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for language
pub fn _language(&self) -> Option<Element> {
if let Some(val) = self.value.get("_language") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for name
pub fn _name(&self) -> Option<Element> {
if let Some(val) = self.value.get("_name") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// This flag is used to mark the record to not be used. This is not used when a
/// center is closed for maintenance, or for holidays, the notAvailable period is to
/// be used for this.
pub fn active(&self) -> Option<bool> {
if let Some(val) = self.value.get("active") {
return Some(val.as_bool().unwrap());
}
return None;
}
/// Indicates whether or not a prospective consumer will require an appointment for a
/// particular service at a site to be provided by the Organization. Indicates if an
/// appointment is required for access to this service.
pub fn appointment_required(&self) -> Option<bool> {
if let Some(val) = self.value.get("appointmentRequired") {
return Some(val.as_bool().unwrap());
}
return None;
}
/// A description of site availability exceptions, e.g. public holiday availability.
/// Succinctly describing all possible exceptions to normal site availability as
/// details in the available Times and not available Times.
pub fn availability_exceptions(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("availabilityExceptions") {
return Some(string);
}
return None;
}
/// A collection of times that the Service Site is available.
pub fn available_time(&self) -> Option<Vec<HealthcareService_AvailableTime>> {
if let Some(Value::Array(val)) = self.value.get("availableTime") {
return Some(
val.into_iter()
.map(|e| HealthcareService_AvailableTime {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Identifies the broad category of service being performed or delivered.
pub fn category(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("category") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Collection of characteristics (attributes).
pub fn characteristic(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("characteristic") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Any additional description of the service and/or any specific issues not covered
/// by the other attributes, which can be displayed as further detail under the
/// serviceName.
pub fn comment(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("comment") {
return Some(string);
}
return None;
}
/// Some services are specifically made available in multiple languages, this property
/// permits a directory to declare the languages this is offered in. Typically this is
/// only provided where a service operates in communities with mixed languages used.
pub fn communication(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("communication") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// These resources do not have an independent existence apart from the resource that
/// contains them - they cannot be identified independently, and nor can they have
/// their own independent transaction scope.
pub fn contained(&self) -> Option<Vec<ResourceList>> {
if let Some(Value::Array(val)) = self.value.get("contained") {
return Some(
val.into_iter()
.map(|e| ResourceList {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The location(s) that this service is available to (not where the service is
/// provided).
pub fn coverage_area(&self) -> Option<Vec<Reference>> {
if let Some(Value::Array(val)) = self.value.get("coverageArea") {
return Some(
val.into_iter()
.map(|e| Reference {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Does this service have specific eligibility requirements that need to be met in
/// order to use the service?
pub fn eligibility(&self) -> Option<Vec<HealthcareService_Eligibility>> {
if let Some(Value::Array(val)) = self.value.get("eligibility") {
return Some(
val.into_iter()
.map(|e| HealthcareService_Eligibility {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Technical endpoints providing access to services operated for the specific
/// healthcare services defined at this resource.
pub fn endpoint(&self) -> Option<Vec<Reference>> {
if let Some(Value::Array(val)) = self.value.get("endpoint") {
return Some(
val.into_iter()
.map(|e| Reference {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the resource. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Extra details about the service that can't be placed in the other fields.
pub fn extra_details(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("extraDetails") {
return Some(string);
}
return None;
}
/// The logical id of the resource, as used in the URL for the resource. Once
/// assigned, this value never changes.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// External identifiers for this item.
pub fn identifier(&self) -> Option<Vec<Identifier>> {
if let Some(Value::Array(val)) = self.value.get("identifier") {
return Some(
val.into_iter()
.map(|e| Identifier {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A reference to a set of rules that were followed when the resource was
/// constructed, and which must be understood when processing the content. Often, this
/// is a reference to an implementation guide that defines the special rules along
/// with other profiles etc.
pub fn implicit_rules(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("implicitRules") {
return Some(string);
}
return None;
}
/// The base language in which the resource is written.
pub fn language(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("language") {
return Some(string);
}
return None;
}
/// The location(s) where this healthcare service may be provided.
pub fn location(&self) -> Option<Vec<Reference>> {
if let Some(Value::Array(val)) = self.value.get("location") {
return Some(
val.into_iter()
.map(|e| Reference {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The metadata about the resource. This is content that is maintained by the
/// infrastructure. Changes to the content might not always be associated with version
/// changes to the resource.
pub fn meta(&self) -> Option<Meta> {
if let Some(val) = self.value.get("meta") {
return Some(Meta {
value: Cow::Borrowed(val),
});
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the resource and that modifies the understanding of the element
/// that contains it and/or the understanding of the containing element's descendants.
/// Usually modifier elements provide negation or qualification. To make the use of
/// extensions safe and manageable, there is a strict set of governance applied to
/// the definition and use of extensions. Though any implementer is allowed to define
/// an extension, there is a set of requirements that SHALL be met as part of the
/// definition of the extension. Applications processing a resource are required to
/// check for modifier extensions. Modifier extensions SHALL NOT change the meaning
/// of any elements on Resource or DomainResource (including cannot change the meaning
/// of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Further description of the service as it would be presented to a consumer while
/// searching.
pub fn name(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("name") {
return Some(string);
}
return None;
}
/// The HealthcareService is not available during this period of time due to the
/// provided reason.
pub fn not_available(&self) -> Option<Vec<HealthcareService_NotAvailable>> {
if let Some(Value::Array(val)) = self.value.get("notAvailable") {
return Some(
val.into_iter()
.map(|e| HealthcareService_NotAvailable {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// If there is a photo/symbol associated with this HealthcareService, it may be
/// included here to facilitate quick identification of the service in a list.
pub fn photo(&self) -> Option<Attachment> {
if let Some(val) = self.value.get("photo") {
return Some(Attachment {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Programs that this service is applicable to.
pub fn program(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("program") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The organization that provides this healthcare service.
pub fn provided_by(&self) -> Option<Reference> {
if let Some(val) = self.value.get("providedBy") {
return Some(Reference {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Ways that the service accepts referrals, if this is not provided then it is
/// implied that no referral is required.
pub fn referral_method(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("referralMethod") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The code(s) that detail the conditions under which the healthcare service is
/// available/offered.
pub fn service_provision_code(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("serviceProvisionCode") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Collection of specialties handled by the service site. This is more of a medical
/// term.
pub fn specialty(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("specialty") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// List of contacts related to this specific healthcare service.
pub fn telecom(&self) -> Option<Vec<ContactPoint>> {
if let Some(Value::Array(val)) = self.value.get("telecom") {
return Some(
val.into_iter()
.map(|e| ContactPoint {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A human-readable narrative that contains a summary of the resource and can be used
/// to represent the content of the resource to a human. The narrative need not encode
/// all the structured data, but is required to contain sufficient detail to make it
/// "clinically safe" for a human to just read the narrative. Resource definitions
/// may define what content should be represented in the narrative to ensure clinical
/// safety.
pub fn text(&self) -> Option<Narrative> {
if let Some(val) = self.value.get("text") {
return Some(Narrative {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The specific type of service that may be delivered or performed.
pub fn fhir_type(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("type") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._active() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._appointment_required() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._availability_exceptions() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._comment() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._extra_details() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._implicit_rules() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._language() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._name() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.active() {}
if let Some(_val) = self.appointment_required() {}
if let Some(_val) = self.availability_exceptions() {}
if let Some(_val) = self.available_time() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.category() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.characteristic() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.comment() {}
if let Some(_val) = self.communication() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.contained() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.coverage_area() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.eligibility() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.endpoint() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.extra_details() {}
if let Some(_val) = self.id() {}
if let Some(_val) = self.identifier() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.implicit_rules() {}
if let Some(_val) = self.language() {}
if let Some(_val) = self.location() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.meta() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.name() {}
if let Some(_val) = self.not_available() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.photo() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.program() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.provided_by() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.referral_method() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.service_provision_code() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.specialty() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.telecom() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.text() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.fhir_type() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
return true;
}
}
#[derive(Debug)]
pub struct HealthcareServiceBuilder {
pub(crate) value: Value,
}
impl HealthcareServiceBuilder {
pub fn build(&self) -> HealthcareService {
HealthcareService {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: HealthcareService) -> HealthcareServiceBuilder {
HealthcareServiceBuilder {
value: (*existing.value).clone(),
}
}
pub fn new() -> HealthcareServiceBuilder {
let mut __value: Value = json!({});
return HealthcareServiceBuilder { value: __value };
}
pub fn _active<'a>(&'a mut self, val: Element) -> &'a mut HealthcareServiceBuilder {
self.value["_active"] = json!(val.value);
return self;
}
pub fn _appointment_required<'a>(
&'a mut self,
val: Element,
) -> &'a mut HealthcareServiceBuilder {
self.value["_appointmentRequired"] = json!(val.value);
return self;
}
pub fn _availability_exceptions<'a>(
&'a mut self,
val: Element,
) -> &'a mut HealthcareServiceBuilder {
self.value["_availabilityExceptions"] = json!(val.value);
return self;
}
pub fn _comment<'a>(&'a mut self, val: Element) -> &'a mut HealthcareServiceBuilder {
self.value["_comment"] = json!(val.value);
return self;
}
pub fn _extra_details<'a>(&'a mut self, val: Element) -> &'a mut HealthcareServiceBuilder {
self.value["_extraDetails"] = json!(val.value);
return self;
}
pub fn _implicit_rules<'a>(&'a mut self, val: Element) -> &'a mut HealthcareServiceBuilder {
self.value["_implicitRules"] = json!(val.value);
return self;
}
pub fn _language<'a>(&'a mut self, val: Element) -> &'a mut HealthcareServiceBuilder {
self.value["_language"] = json!(val.value);
return self;
}
pub fn _name<'a>(&'a mut self, val: Element) -> &'a mut HealthcareServiceBuilder {
self.value["_name"] = json!(val.value);
return self;
}
pub fn active<'a>(&'a mut self, val: bool) -> &'a mut HealthcareServiceBuilder {
self.value["active"] = json!(val);
return self;
}
pub fn appointment_required<'a>(&'a mut self, val: bool) -> &'a mut HealthcareServiceBuilder {
self.value["appointmentRequired"] = json!(val);
return self;
}
pub fn availability_exceptions<'a>(
&'a mut self,
val: &str,
) -> &'a mut HealthcareServiceBuilder {
self.value["availabilityExceptions"] = json!(val);
return self;
}
pub fn available_time<'a>(
&'a mut self,
val: Vec<HealthcareService_AvailableTime>,
) -> &'a mut HealthcareServiceBuilder {
self.value["availableTime"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn category<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["category"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn characteristic<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["characteristic"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn comment<'a>(&'a mut self, val: &str) -> &'a mut HealthcareServiceBuilder {
self.value["comment"] = json!(val);
return self;
}
pub fn communication<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["communication"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn contained<'a>(&'a mut self, val: Vec<ResourceList>) -> &'a mut HealthcareServiceBuilder {
self.value["contained"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn coverage_area<'a>(
&'a mut self,
val: Vec<Reference>,
) -> &'a mut HealthcareServiceBuilder {
self.value["coverageArea"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn eligibility<'a>(
&'a mut self,
val: Vec<HealthcareService_Eligibility>,
) -> &'a mut HealthcareServiceBuilder {
self.value["eligibility"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn endpoint<'a>(&'a mut self, val: Vec<Reference>) -> &'a mut HealthcareServiceBuilder {
self.value["endpoint"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut HealthcareServiceBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn extra_details<'a>(&'a mut self, val: &str) -> &'a mut HealthcareServiceBuilder {
self.value["extraDetails"] = json!(val);
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut HealthcareServiceBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn identifier<'a>(&'a mut self, val: Vec<Identifier>) -> &'a mut HealthcareServiceBuilder {
self.value["identifier"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn implicit_rules<'a>(&'a mut self, val: &str) -> &'a mut HealthcareServiceBuilder {
self.value["implicitRules"] = json!(val);
return self;
}
pub fn language<'a>(&'a mut self, val: &str) -> &'a mut HealthcareServiceBuilder {
self.value["language"] = json!(val);
return self;
}
pub fn location<'a>(&'a mut self, val: Vec<Reference>) -> &'a mut HealthcareServiceBuilder {
self.value["location"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn meta<'a>(&'a mut self, val: Meta) -> &'a mut HealthcareServiceBuilder {
self.value["meta"] = json!(val.value);
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut HealthcareServiceBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn name<'a>(&'a mut self, val: &str) -> &'a mut HealthcareServiceBuilder {
self.value["name"] = json!(val);
return self;
}
pub fn not_available<'a>(
&'a mut self,
val: Vec<HealthcareService_NotAvailable>,
) -> &'a mut HealthcareServiceBuilder {
self.value["notAvailable"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn photo<'a>(&'a mut self, val: Attachment) -> &'a mut HealthcareServiceBuilder {
self.value["photo"] = json!(val.value);
return self;
}
pub fn program<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["program"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn provided_by<'a>(&'a mut self, val: Reference) -> &'a mut HealthcareServiceBuilder {
self.value["providedBy"] = json!(val.value);
return self;
}
pub fn referral_method<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["referralMethod"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn service_provision_code<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["serviceProvisionCode"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn specialty<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["specialty"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn telecom<'a>(&'a mut self, val: Vec<ContactPoint>) -> &'a mut HealthcareServiceBuilder {
self.value["telecom"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn text<'a>(&'a mut self, val: Narrative) -> &'a mut HealthcareServiceBuilder {
self.value["text"] = json!(val.value);
return self;
}
pub fn fhir_type<'a>(
&'a mut self,
val: Vec<CodeableConcept>,
) -> &'a mut HealthcareServiceBuilder {
self.value["type"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
}
| 34.629065 | 100 | 0.534996 |
c1e9cd31d6a3a08027f2607d9c2008a43cb55656 | 3,258 | use crate::{mock::*, Error};
use frame_support::{assert_noop, assert_ok};
use super::*;
#[test]
fn create_claim_works() {
new_test_ext().execute_with(|| {
let claim = vec![0,1];
assert_ok!(PoeModule::create_claim(Origin::signed(1), claim.clone()));
assert_eq!(
Proofs::<Test>::get(&claim),
Some((1, frame_system::Pallet::<Test>::block_number() )
));
})
}
#[test]
fn create_claim_failed_when_exceeding_size() {
new_test_ext().execute_with(|| {
let claim = vec![0,1,2,3,4,5,6,7];
assert_noop!(
PoeModule::create_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ClaimSizeExceeded
);
})
}
#[test]
fn create_claim_failed_when_claim_already_exist() {
new_test_ext().execute_with(||{
let claim = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::create_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ProofAlreadyExist
);
})
}
#[test]
fn revoke_claim_works() {
new_test_ext().execute_with(||{
let claim = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_ok!(PoeModule::revoke_claim(Origin::signed(1), claim.clone()));
assert_eq!(Proofs::<Test>::get(&claim), None);
})
}
#[test]
fn revoke_claim_failed_when_claim_is_not_exist(){
new_test_ext().execute_with(||{
let claim = vec![0,1];
assert_noop!(
PoeModule::revoke_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ClaimNotExist
);
})
}
#[test]
fn revoke_claim_failed_when_sender_is_not_owner(){
new_test_ext().execute_with(||{
let claim = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::revoke_claim(Origin::signed(2), claim.clone()),
Error::<Test>::NotClaimOwner
);
})
}
#[test]
fn transfer_claim_works() {
new_test_ext().execute_with(|| {
let claim = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_ok!(PoeModule::transfer_claim(Origin::signed(1), claim.clone(), 2));
assert_eq!(
Proofs::<Test>::get(&claim),
Some((2, frame_system::Pallet::<Test>::block_number() )
));
})
}
#[test]
fn transfer_claim_failed_when_claim_not_exist() {
new_test_ext().execute_with(|| {
let claim = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
let claim_that_do_not_exist = vec!(1,2);
assert_noop!(
PoeModule::transfer_claim(Origin::signed(1), claim_that_do_not_exist.clone(), 2),
Error::<Test>::ClaimNotExist
);
})
}
#[test]
fn transfer_claim_failed_when_sender_is_not_claim_owner() {
new_test_ext().execute_with(|| {
let claim = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::transfer_claim(Origin::signed(2), claim.clone(), 2),
Error::<Test>::NotClaimOwner
);
})
}
| 26.704918 | 93 | 0.586863 |
bf70d7a173e7aea5fedec38ce0de1499db33d47d | 161 | use crate::decode::Decoder;
impl<'a, 'b: 'a> Decoder<'a, 'b> {
impl_decode_rr_vec!(EID, data, rr_eid);
impl_decode_rr_vec!(NIMLOC, data, rr_nimloc);
}
| 20.125 | 49 | 0.652174 |
489a6f5d683d434b8dbb3125758d93bffab01552 | 4,642 | use super::space_from_ty::{space_can_decompose, space_decompose};
use super::*;
/// スペースからスペースを引く。
pub(crate) fn space_subtraction(mut first: Space, mut second: Space, td: &TyDatabase) -> Space {
// 空のスペースからは何を引いても空。
if first.is_empty() {
return Space::new_empty();
}
// 空のスペースを引いても変化しない。
if second.is_empty() {
return first;
}
match (&mut first, &mut second) {
// 部分型スペースから上位型スペースを引くと空になる。
(Space::Ty(ref subty), Space::Ty(ref super_ty)) if td.is_subtype_of(subty, super_ty) => {
Space::new_empty()
}
// コンストラクタ型スペースからコンストラクタスペースを引く。
// 左辺をコンストラクタスペースにばらすだけ。
(Space::Ty(ref mut ty), Space::Constructor { ref name, .. })
if ty.is_constructor_of_name(name) =>
{
let ty = std::mem::replace(ty, Ty::default());
let first = space_decompose(Space::Ty(ty), td);
space_subtraction(first, second, td)
}
// ユニオンを分配する。
// (x | y) \ z = x \ z | y \ z
(Space::Union(ref mut union), _) => {
let union = std::mem::replace(union, vec![]);
Space::new_union(
union
.into_iter()
.map(|subspace| space_subtraction(subspace, second.clone(), td)),
)
}
// x \ (y | z) = x \ y \ z
(_, Space::Union(ref mut union)) => {
let union = std::mem::replace(union, vec![]);
union
.into_iter()
.fold(first, |first, second| space_subtraction(first, second, td))
}
// コンストラクタスペースから、そのコンストラクタを含む型のスペースを引くと、空になる。
(Space::Constructor { ref name, .. }, Space::Ty(ref ty))
if ty.is_constructor_of_name(name) =>
{
Space::new_empty()
}
// コンストラクタが等しいコンストラクタスペースを引く。
(
Space::Constructor {
ref mut name,
args: ref mut first_args,
},
Space::Constructor {
name: ref second_name,
args: ref mut second_args,
},
) if name == second_name => {
let name = std::mem::replace(name, String::default());
let first_args = std::mem::replace(first_args, vec![]);
let second_args = std::mem::replace(second_args, vec![]);
debug_assert_eq!(
first_args.len(),
second_args.len(),
"同じコンストラクタの引数の個数は一致するはず"
);
// すべての引数がカバーされているなら空になる。
// (これは最後のケースの特別な場合を効率よく処理するもの、だと思う。)
let all_are_covered =
first_args
.iter()
.zip(second_args.iter())
.all(|(first, second)| {
let leak = space_subtraction(first.clone(), second.clone(), td);
leak.is_empty()
});
if all_are_covered {
return Space::new_empty();
}
// いずれかの引数のスペースが直交していたら何もしない。
// (いずれかの引数のスペースが空だったら、コンストラクタパターンも空。)
// (それ以外のケースに関しては、これも最後のケースの特別な場合を効率よく処理するもの、だと思う。)
// FIXME: 実装
let any_is_empty = first_args.iter().any(|arg| arg.is_empty());
if any_is_empty {
return Space::new_empty();
}
// いずれかの引数スペースの差を取って、残りはそのまま、というスペースの和を作る。
// 例えば型 (bool, bool) のパターンマッチで (true, false) というケースがあるとき、
// 残りのケースとして考えられるのは「.0 が true でない」または「.1 が false でない」。
// この「~でない」を引き算で、「または」をユニオンで表している。
let mut spaces = vec![];
for t in 0..first_args.len() {
let mut args = vec![];
for i in 0..first_args.len() {
args.push(if i == t {
space_subtraction(first_args[i].clone(), second_args[i].clone(), td)
} else {
first_args[i].clone()
});
}
spaces.push(Space::Constructor {
name: name.to_string(),
args,
});
}
Space::new_union(spaces)
}
// 型スペースを分解して差をとる。
(&mut ref s, _) if space_can_decompose(s, td) => {
let first = space_decompose(first, td);
space_subtraction(first, second, td)
}
(_, &mut ref s) if space_can_decompose(s, td) => {
let second = space_decompose(second, td);
space_subtraction(first, second, td)
}
_ => first,
}
}
| 33.637681 | 97 | 0.482766 |
21b2a30f2b4859c701e94b3f8eaf6d9aa87b1ab5 | 10,299 | #[doc = "Reader of register DSEQCTRL"]
pub type R = crate::R<u32, super::DSEQCTRL>;
#[doc = "Writer for register DSEQCTRL"]
pub type W = crate::W<u32, super::DSEQCTRL>;
#[doc = "Register DSEQCTRL `reset()`'s with value 0"]
impl crate::ResetValue for super::DSEQCTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `INPUTCTRL`"]
pub type INPUTCTRL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `INPUTCTRL`"]
pub struct INPUTCTRL_W<'a> {
w: &'a mut W,
}
impl<'a> INPUTCTRL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `CTRLB`"]
pub type CTRLB_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CTRLB`"]
pub struct CTRLB_W<'a> {
w: &'a mut W,
}
impl<'a> CTRLB_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `REFCTRL`"]
pub type REFCTRL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `REFCTRL`"]
pub struct REFCTRL_W<'a> {
w: &'a mut W,
}
impl<'a> REFCTRL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `AVGCTRL`"]
pub type AVGCTRL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `AVGCTRL`"]
pub struct AVGCTRL_W<'a> {
w: &'a mut W,
}
impl<'a> AVGCTRL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `SAMPCTRL`"]
pub type SAMPCTRL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SAMPCTRL`"]
pub struct SAMPCTRL_W<'a> {
w: &'a mut W,
}
impl<'a> SAMPCTRL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `WINLT`"]
pub type WINLT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `WINLT`"]
pub struct WINLT_W<'a> {
w: &'a mut W,
}
impl<'a> WINLT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `WINUT`"]
pub type WINUT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `WINUT`"]
pub struct WINUT_W<'a> {
w: &'a mut W,
}
impl<'a> WINUT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `GAINCORR`"]
pub type GAINCORR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GAINCORR`"]
pub struct GAINCORR_W<'a> {
w: &'a mut W,
}
impl<'a> GAINCORR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `OFFSETCORR`"]
pub type OFFSETCORR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OFFSETCORR`"]
pub struct OFFSETCORR_W<'a> {
w: &'a mut W,
}
impl<'a> OFFSETCORR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `AUTOSTART`"]
pub type AUTOSTART_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `AUTOSTART`"]
pub struct AUTOSTART_W<'a> {
w: &'a mut W,
}
impl<'a> AUTOSTART_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bit 0 - Input Control"]
#[inline(always)]
pub fn inputctrl(&self) -> INPUTCTRL_R {
INPUTCTRL_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Control B"]
#[inline(always)]
pub fn ctrlb(&self) -> CTRLB_R {
CTRLB_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Reference Control"]
#[inline(always)]
pub fn refctrl(&self) -> REFCTRL_R {
REFCTRL_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Average Control"]
#[inline(always)]
pub fn avgctrl(&self) -> AVGCTRL_R {
AVGCTRL_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Sampling Time Control"]
#[inline(always)]
pub fn sampctrl(&self) -> SAMPCTRL_R {
SAMPCTRL_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Window Monitor Lower Threshold"]
#[inline(always)]
pub fn winlt(&self) -> WINLT_R {
WINLT_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Window Monitor Upper Threshold"]
#[inline(always)]
pub fn winut(&self) -> WINUT_R {
WINUT_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Gain Correction"]
#[inline(always)]
pub fn gaincorr(&self) -> GAINCORR_R {
GAINCORR_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Offset Correction"]
#[inline(always)]
pub fn offsetcorr(&self) -> OFFSETCORR_R {
OFFSETCORR_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 31 - ADC Auto-Start Conversion"]
#[inline(always)]
pub fn autostart(&self) -> AUTOSTART_R {
AUTOSTART_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Input Control"]
#[inline(always)]
pub fn inputctrl(&mut self) -> INPUTCTRL_W {
INPUTCTRL_W { w: self }
}
#[doc = "Bit 1 - Control B"]
#[inline(always)]
pub fn ctrlb(&mut self) -> CTRLB_W {
CTRLB_W { w: self }
}
#[doc = "Bit 2 - Reference Control"]
#[inline(always)]
pub fn refctrl(&mut self) -> REFCTRL_W {
REFCTRL_W { w: self }
}
#[doc = "Bit 3 - Average Control"]
#[inline(always)]
pub fn avgctrl(&mut self) -> AVGCTRL_W {
AVGCTRL_W { w: self }
}
#[doc = "Bit 4 - Sampling Time Control"]
#[inline(always)]
pub fn sampctrl(&mut self) -> SAMPCTRL_W {
SAMPCTRL_W { w: self }
}
#[doc = "Bit 5 - Window Monitor Lower Threshold"]
#[inline(always)]
pub fn winlt(&mut self) -> WINLT_W {
WINLT_W { w: self }
}
#[doc = "Bit 6 - Window Monitor Upper Threshold"]
#[inline(always)]
pub fn winut(&mut self) -> WINUT_W {
WINUT_W { w: self }
}
#[doc = "Bit 7 - Gain Correction"]
#[inline(always)]
pub fn gaincorr(&mut self) -> GAINCORR_W {
GAINCORR_W { w: self }
}
#[doc = "Bit 8 - Offset Correction"]
#[inline(always)]
pub fn offsetcorr(&mut self) -> OFFSETCORR_W {
OFFSETCORR_W { w: self }
}
#[doc = "Bit 31 - ADC Auto-Start Conversion"]
#[inline(always)]
pub fn autostart(&mut self) -> AUTOSTART_W {
AUTOSTART_W { w: self }
}
}
| 28.848739 | 86 | 0.537431 |
e580c465e6e72ea01598a5d4c8df4f9fb706aedc | 228 | mod agents;
mod managed_devices;
#[cfg_attr(feature = "integration-tests", visibility::make(pub))]
pub(crate) use agents::*;
#[cfg_attr(feature = "integration-tests", visibility::make(pub))]
pub(crate) use managed_devices::*;
| 25.333333 | 65 | 0.72807 |
39159b345fc13a7576377f71c107f2bfed3af919 | 96 | #![feature(type_alias_impl_trait)]
pub mod monitor;
pub mod window;
pub mod x_windows_monitor;
| 16 | 34 | 0.791667 |
0ee46b4ce789a9610f6630f61d90e5902b47af3b | 38,215 | use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use super::Conductor;
use super::ConductorState;
use super::*;
use crate::conductor::api::error::ConductorApiError;
use crate::core::ribosome::guest_callback::validate::ValidateResult;
use crate::sweettest::*;
use crate::test_utils::fake_valid_dna_file;
use crate::{
assert_eq_retry_10s, core::ribosome::guest_callback::genesis_self_check::GenesisSelfCheckResult,
};
use ::fixt::prelude::*;
use holochain_conductor_api::InstalledAppInfoStatus;
use holochain_conductor_api::{AdminRequest, AdminResponse, AppRequest, AppResponse, ZomeCall};
use holochain_keystore::crude_mock_keystore::spawn_crude_mock_keystore;
use holochain_state::prelude::*;
use holochain_types::test_utils::fake_cell_id;
use holochain_wasm_test_utils::TestWasm;
use holochain_websocket::WebsocketSender;
use kitsune_p2p_types::dependencies::lair_keystore_api_0_0::LairError;
use maplit::hashset;
use matches::assert_matches;
#[tokio::test(flavor = "multi_thread")]
async fn can_update_state() {
let envs = test_environments();
let dna_store = MockDnaStore::new();
let keystore = envs.conductor().keystore().clone();
let holochain_p2p = holochain_p2p::stub_network().await;
let (post_commit_sender, _post_commit_receiver) =
tokio::sync::mpsc::channel(POST_COMMIT_CHANNEL_BOUND);
let conductor = Conductor::new(
envs.conductor(),
envs.wasm(),
dna_store,
keystore,
envs.path().to_path_buf().into(),
holochain_p2p,
DbSyncLevel::default(),
post_commit_sender,
)
.await
.unwrap();
let state = conductor.get_state().await.unwrap();
assert_eq!(state, ConductorState::default());
let cell_id = fake_cell_id(1);
let installed_cell = InstalledCell::new(cell_id.clone(), "role_id".to_string());
let app = InstalledAppCommon::new_legacy("fake app", vec![installed_cell]).unwrap();
conductor
.update_state(|mut state| {
state.add_app(app)?;
Ok(state)
})
.await
.unwrap();
let state = conductor.get_state().await.unwrap();
assert_eq!(
state.stopped_apps().map(second).collect::<Vec<_>>()[0]
.all_cells()
.collect::<Vec<_>>()
.as_slice(),
&[&cell_id]
);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_add_clone_cell_to_app() {
let envs = test_environments();
let keystore = envs.conductor().keystore().clone();
let holochain_p2p = holochain_p2p::stub_network().await;
let agent = fixt!(AgentPubKey);
let dna = fake_valid_dna_file("");
let cell_id = CellId::new(dna.dna_hash().to_owned(), agent.clone());
let dna_store = RealDnaStore::new();
let (post_commit_sender, _post_commit_receiver) =
tokio::sync::mpsc::channel(POST_COMMIT_CHANNEL_BOUND);
let conductor = Conductor::new(
envs.conductor(),
envs.wasm(),
dna_store,
keystore,
envs.path().to_path_buf().into(),
holochain_p2p,
DbSyncLevel::default(),
post_commit_sender,
)
.await
.unwrap();
let installed_cell = InstalledCell::new(cell_id.clone(), "role_id".to_string());
let role = AppRoleAssignment::new(cell_id.clone(), true, 1);
let app1 = InstalledAppCommon::new_legacy("no clone", vec![installed_cell.clone()]).unwrap();
let app2 = InstalledAppCommon::new("yes clone", agent, vec![("role_id".into(), role.clone())]);
assert_eq!(
app1.roles().keys().collect::<Vec<_>>(),
vec![&"role_id".to_string()]
);
assert_eq!(
app2.roles().keys().collect::<Vec<_>>(),
vec![&"role_id".to_string()]
);
conductor.register_phenotype(dna);
conductor
.update_state(move |mut state| {
state
.installed_apps_mut()
.insert(RunningApp::from(app1.clone()).into());
state
.installed_apps_mut()
.insert(RunningApp::from(app2.clone()).into());
Ok(state)
})
.await
.unwrap();
matches::assert_matches!(
conductor
.add_clone_cell_to_app("no clone".to_string(), "role_id".to_string(), ().into())
.await,
Err(ConductorError::AppError(AppError::CloneLimitExceeded(0, _)))
);
let cloned_cell_id = conductor
.add_clone_cell_to_app("yes clone".to_string(), "role_id".to_string(), ().into())
.await
.unwrap();
let state = conductor.get_state().await.unwrap();
assert_eq!(
state
.running_apps()
.find(|(id, _)| &id[..] == "yes clone")
.unwrap()
.1
.cloned_cells()
.cloned()
.collect::<Vec<CellId>>(),
vec![cloned_cell_id]
);
}
/// App can't be installed if another app is already installed under the
/// same InstalledAppId
#[tokio::test(flavor = "multi_thread")]
async fn app_ids_are_unique() {
let environments = test_environments();
let dna_store = MockDnaStore::new();
let holochain_p2p = holochain_p2p::stub_network().await;
let (post_commit_sender, _post_commit_receiver) =
tokio::sync::mpsc::channel(POST_COMMIT_CHANNEL_BOUND);
let conductor = Conductor::new(
environments.conductor(),
environments.wasm(),
dna_store,
environments.keystore().clone(),
environments.path().to_path_buf().into(),
holochain_p2p,
DbSyncLevel::default(),
post_commit_sender,
)
.await
.unwrap();
let cell_id = fake_cell_id(1);
let installed_cell = InstalledCell::new(cell_id.clone(), "handle".to_string());
let app = InstalledAppCommon::new_legacy("id".to_string(), vec![installed_cell]).unwrap();
conductor
.add_disabled_app_to_db(app.clone().into())
.await
.unwrap();
assert_matches!(
conductor.add_disabled_app_to_db(app.clone().into()).await,
Err(ConductorError::AppAlreadyInstalled(id))
if id == "id".to_string()
);
//- it doesn't matter whether the app is active or inactive
let (_, delta) = conductor
.transition_app_status("id".to_string(), AppStatusTransition::Enable)
.await
.unwrap();
assert_eq!(delta, AppStatusFx::SpinUp);
assert_matches!(
conductor.add_disabled_app_to_db(app.clone().into()).await,
Err(ConductorError::AppAlreadyInstalled(id))
if id == "id".to_string()
);
}
/// App can't be installed if it contains duplicate AppRoleIds
#[tokio::test(flavor = "multi_thread")]
async fn app_role_ids_are_unique() {
let cells = vec![
InstalledCell::new(fixt!(CellId), "1".into()),
InstalledCell::new(fixt!(CellId), "1".into()),
InstalledCell::new(fixt!(CellId), "2".into()),
];
let result = InstalledAppCommon::new_legacy("id", cells.into_iter());
matches::assert_matches!(
result,
Err(AppError::DuplicateAppRoleIds(_, role_ids)) if role_ids == vec!["1".to_string()]
);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_set_fake_state() {
let envs = test_environments();
let state = ConductorState::default();
let conductor = ConductorBuilder::new()
.fake_state(state.clone())
.test(&envs, &[])
.await
.unwrap();
assert_eq!(state, conductor.get_state_from_handle().await.unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn proxy_tls_with_test_keystore() {
observability::test_run().ok();
let keystore1 = spawn_test_keystore().await.unwrap();
let keystore2 = spawn_test_keystore().await.unwrap();
if let Err(e) = proxy_tls_inner(keystore1.clone(), keystore2.clone()).await {
panic!("{:#?}", e);
}
let _ = keystore1.shutdown().await;
let _ = keystore2.shutdown().await;
}
async fn proxy_tls_inner(
keystore1: MetaLairClient,
keystore2: MetaLairClient,
) -> anyhow::Result<()> {
use ghost_actor::GhostControlSender;
use kitsune_p2p::dependencies::*;
use kitsune_p2p_proxy::*;
use kitsune_p2p_types::transport::*;
let (cert_digest, cert, cert_priv_key) = keystore1.get_or_create_first_tls_cert().await?;
let tls_config1 = TlsConfig {
cert,
cert_priv_key,
cert_digest,
};
let (cert_digest, cert, cert_priv_key) = keystore2.get_or_create_first_tls_cert().await?;
let tls_config2 = TlsConfig {
cert,
cert_priv_key,
cert_digest,
};
let proxy_config =
ProxyConfig::local_proxy_server(tls_config1, AcceptProxyCallback::reject_all());
let (bind, evt) = kitsune_p2p_types::transport_mem::spawn_bind_transport_mem().await?;
let (bind1, mut evt1) = spawn_kitsune_proxy_listener(
proxy_config,
kitsune_p2p::dependencies::kitsune_p2p_types::config::KitsuneP2pTuningParams::default(),
bind,
evt,
)
.await?;
tokio::task::spawn(async move {
while let Some(evt) = evt1.next().await {
match evt {
TransportEvent::IncomingChannel(_, mut write, read) => {
println!("YOOTH");
let data = read.read_to_end().await;
let data = String::from_utf8_lossy(&data);
let data = format!("echo: {}", data);
write.write_and_close(data.into_bytes()).await?;
}
}
}
TransportResult::Ok(())
});
let url1 = bind1.bound_url().await?;
println!("{:?}", url1);
let proxy_config =
ProxyConfig::local_proxy_server(tls_config2, AcceptProxyCallback::reject_all());
let (bind, evt) = kitsune_p2p_types::transport_mem::spawn_bind_transport_mem().await?;
let (bind2, _evt2) = spawn_kitsune_proxy_listener(
proxy_config,
kitsune_p2p::dependencies::kitsune_p2p_types::config::KitsuneP2pTuningParams::default(),
bind,
evt,
)
.await?;
println!("{:?}", bind2.bound_url().await?);
let (_url, mut write, read) = bind2.create_channel(url1).await?;
write.write_and_close(b"test".to_vec()).await?;
let data = read.read_to_end().await;
let data = String::from_utf8_lossy(&data);
assert_eq!("echo: test", data);
let _ = bind1.ghost_actor_shutdown_immediate().await;
let _ = bind2.ghost_actor_shutdown_immediate().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
async fn test_list_running_apps_for_cell_id() {
observability::test_run().ok();
let mk_dna = |name| async move {
let zome = InlineZome::new_unique(Vec::new());
SweetDnaFile::unique_from_inline_zome(name, zome)
.await
.unwrap()
};
// Create three unique DNAs
let (dna1, _) = mk_dna("zome1").await;
let (dna2, _) = mk_dna("zome2").await;
let (dna3, _) = mk_dna("zome3").await;
// Install two apps on the Conductor:
// Both share a CellId in common, and also include a distinct CellId each.
let mut conductor = SweetConductor::from_standard_config().await;
let alice = SweetAgents::one(conductor.keystore()).await;
let app1 = conductor
.setup_app_for_agent("app1", alice.clone(), &[dna1.clone(), dna2])
.await
.unwrap();
let app2 = conductor
.setup_app_for_agent("app2", alice.clone(), &[dna1, dna3])
.await
.unwrap();
let (cell1, cell2) = app1.into_tuple();
let (_, cell3) = app2.into_tuple();
let list_apps = |conductor: ConductorHandle, cell: SweetCell| async move {
conductor
.list_running_apps_for_required_cell_id(cell.cell_id())
.await
.unwrap()
};
// - Ensure that the first CellId is associated with both apps,
// and the other two are only associated with one app each.
assert_eq!(
list_apps(conductor.clone(), cell1).await,
hashset!["app1".to_string(), "app2".to_string()]
);
assert_eq!(
list_apps(conductor.clone(), cell2).await,
hashset!["app1".to_string()]
);
assert_eq!(
list_apps(conductor.clone(), cell3).await,
hashset!["app2".to_string()]
);
}
async fn mk_dna(name: &str, zome: InlineZome) -> DnaResult<(DnaFile, Zome)> {
SweetDnaFile::unique_from_inline_zome(name, zome).await
}
/// A function that sets up a SweetApp, used in several tests in this module
async fn common_genesis_test_app(
conductor: &mut SweetConductor,
custom_zome: InlineZome,
) -> ConductorApiResult<SweetApp> {
let hardcoded_zome = InlineZome::new_unique(Vec::new());
// Just a strong reminder that we need to be careful once we start using existing Cells:
// When a Cell panics or fails validation in general, we want to disable all Apps touching that Cell.
// However, if the panic/failure happens during Genesis, we want to completely
// destroy the app which is attempting to Create that Cell, but *NOT* any other apps
// which might be touching that Cell.
//
// It probably works out to be the same either way, since if we are creating a Cell,
// no other app could be possibly referencing it, but just in case we have some kind of complex
// behavior like installing two apps which reference each others' Cells at the same time,
// we need to be aware of this distinction.
holochain_types::app::we_must_remember_to_rework_cell_panic_handling_after_implementing_use_existing_cell_resolution(
);
// Create one DNA which always works, and another from a zome that gets passed in
let (dna_hardcoded, _) = mk_dna("hardcoded", hardcoded_zome).await?;
let (dna_custom, _) = mk_dna("custom", custom_zome).await?;
// Install both DNAs under the same app:
conductor
.setup_app(&"app", &[dna_hardcoded, dna_custom])
.await
}
#[tokio::test(flavor = "multi_thread")]
async fn test_uninstall_app() {
observability::test_run().ok();
let zome = InlineZome::new_unique(Vec::new());
let mut conductor = SweetConductor::from_standard_config().await;
common_genesis_test_app(&mut conductor, zome).await.unwrap();
// - Ensure that the app is active
assert_eq_retry_10s!(
{
let state = conductor.get_state_from_handle().await.unwrap();
(state.running_apps().count(), state.stopped_apps().count())
},
(1, 0)
);
conductor
.inner_handle()
.uninstall_app(&"app".to_string())
.await
.unwrap();
// - Ensure that the app is removed
assert_eq_retry_10s!(
{
let state = conductor.get_state_from_handle().await.unwrap();
(state.running_apps().count(), state.stopped_apps().count())
},
(0, 0)
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_reconciliation_idempotency() {
observability::test_run().ok();
let zome = InlineZome::new_unique(Vec::new());
let mut conductor = SweetConductor::from_standard_config().await;
common_genesis_test_app(&mut conductor, zome).await.unwrap();
conductor
.inner_handle()
.reconcile_cell_status_with_app_status()
.await
.unwrap();
conductor
.inner_handle()
.reconcile_cell_status_with_app_status()
.await
.unwrap();
// - Ensure that the app is active
assert_eq_retry_10s!(conductor.list_running_apps().await.unwrap().len(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_signing_error_during_genesis() {
observability::test_run().ok();
let bad_keystore = spawn_crude_mock_keystore(|| LairError::other("test error"))
.await
.unwrap();
let envs = test_envs_with_keystore(bad_keystore);
let config = ConductorConfig::default();
let mut conductor = SweetConductor::new(
SweetConductor::handle_from_existing(&envs, &config, &[]).await,
envs,
config,
)
.await;
let (dna, _) = SweetDnaFile::unique_from_test_wasms(vec![TestWasm::Sign])
.await
.unwrap();
let result = conductor
.setup_app_for_agents(&"app", &[fixt!(AgentPubKey)], &[dna])
.await;
// - Assert that we got an error during Genesis. However, this test is
// pretty useless. What we really want is to ensure that the system is
// resilient when this type of error comes up in a real setting.
let err = if let Err(err) = result {
err
} else {
panic!("this should have been an error")
};
if let ConductorApiError::ConductorError(inner) = err {
assert_matches!(*inner, ConductorError::GenesisFailed { errors } if errors.len() == 1);
} else {
panic!("this should have been an error too");
}
}
async fn make_signing_call(client: &mut WebsocketSender, cell: &SweetCell) -> AppResponse {
client
.request(AppRequest::ZomeCall(Box::new(ZomeCall {
cell_id: cell.cell_id().clone(),
zome_name: "sign".into(),
fn_name: "sign_ephemeral".into(),
payload: ExternIO::encode(()).unwrap(),
cap_secret: None,
provenance: cell.agent_pubkey().clone(),
})))
.await
.unwrap()
}
/// A test which simulates Keystore errors with a test keystore which is designed
/// to fail.
///
/// This test was written making the assumption that we could swap out the
/// MetaLairClient for each Cell at runtime, but given our current concurrency
/// model which puts each Cell in an Arc, this is not possible.
/// In order to implement this test, we should probably have the "crude mock
/// keystore" listen on a channel which toggles its behavior from always-correct
/// to always-failing. However, the problem that this test is testing for does
/// not seem to be an issue, therefore I'm not putting the effort into fixing it
/// right now.
#[tokio::test(flavor = "multi_thread")]
#[ignore = "we need a better mock keystore in order to implement this test"]
#[allow(unreachable_code, unused_variables, unused_mut)]
async fn test_signing_error_during_genesis_doesnt_bork_interfaces() {
observability::test_run().ok();
let good_keystore = spawn_test_keystore().await.unwrap();
let bad_keystore = spawn_crude_mock_keystore(|| LairError::other("test error"))
.await
.unwrap();
let envs = test_envs_with_keystore(good_keystore.clone());
let config = standard_config();
let mut conductor = SweetConductor::new(
SweetConductor::handle_from_existing(&envs, &config, &[]).await,
envs,
config,
)
.await;
let (agent1, agent2, agent3) = SweetAgents::three(good_keystore.clone()).await;
let (dna, _) = SweetDnaFile::unique_from_test_wasms(vec![TestWasm::Sign])
.await
.unwrap();
let app1 = conductor
.setup_app_for_agent("app1", agent1.clone(), &[dna.clone()])
.await
.unwrap();
let app2 = conductor
.setup_app_for_agent("app2", agent2.clone(), &[dna.clone()])
.await
.unwrap();
let (cell1,) = app1.into_tuple();
let (cell2,) = app2.into_tuple();
let app_port = conductor.inner_handle().add_app_interface(0).await.unwrap();
let (mut app_client, _) = websocket_client_by_port(app_port).await.unwrap();
let (mut admin_client, _) = conductor.admin_ws_client().await;
// Now use the bad keystore to cause a signing error on the next zome call
todo!("switch keystore to always-erroring mode");
let response: AdminResponse = admin_client
.request(AdminRequest::InstallApp(Box::new(InstallAppPayload {
installed_app_id: "app3".into(),
agent_key: agent3.clone(),
dnas: vec![InstallAppDnaPayload {
role_id: "whatever".into(),
hash: dna.dna_hash().clone(),
membrane_proof: None,
}],
})))
.await
.unwrap();
// TODO: match the errors more tightly
assert_matches!(response, AdminResponse::Error(_));
let response = make_signing_call(&mut app_client, &cell2).await;
assert_matches!(response, AppResponse::Error(_));
// Go back to the good keystore, see if we can proceed
todo!("switch keystore to always-correct mode");
let response = make_signing_call(&mut app_client, &cell2).await;
assert_matches!(response, AppResponse::ZomeCall(_));
let response = make_signing_call(&mut app_client, &cell1).await;
assert_matches!(response, AppResponse::ZomeCall(_));
// conductor
// .setup_app_for_agent("app3", agent3, &[dna.clone()])
// .await
// .unwrap();
}
pub(crate) fn simple_create_entry_zome() -> InlineZome {
let unit_entry_def = EntryDef::default_with_id("unit");
InlineZome::new_unique(vec![unit_entry_def.clone()]).callback("create", move |api, ()| {
let entry_def_id: EntryDefId = unit_entry_def.id.clone();
let entry = Entry::app(().try_into().unwrap()).unwrap();
let hash = api.create(CreateInput::new(
entry_def_id,
entry,
ChainTopOrdering::default(),
))?;
Ok(hash)
})
}
#[tokio::test(flavor = "multi_thread")]
async fn test_reenable_app() {
observability::test_run().ok();
let zome = simple_create_entry_zome();
let mut conductor = SweetConductor::from_standard_config().await;
let app = common_genesis_test_app(&mut conductor, zome).await.unwrap();
let all_apps = conductor.list_apps(None).await.unwrap();
assert_eq!(all_apps.len(), 1);
let inactive_apps = conductor
.list_apps(Some(AppStatusFilter::Disabled))
.await
.unwrap();
let active_apps = conductor
.list_apps(Some(AppStatusFilter::Enabled))
.await
.unwrap();
assert_eq!(inactive_apps.len(), 0);
assert_eq!(active_apps.len(), 1);
assert_eq!(active_apps[0].cell_data.len(), 2);
assert_matches!(active_apps[0].status, InstalledAppInfoStatus::Running);
conductor
.disable_app("app".to_string(), DisabledAppReason::User)
.await
.unwrap();
let inactive_apps = conductor
.list_apps(Some(AppStatusFilter::Disabled))
.await
.unwrap();
let active_apps = conductor
.list_apps(Some(AppStatusFilter::Enabled))
.await
.unwrap();
assert_eq!(active_apps.len(), 0);
assert_eq!(inactive_apps.len(), 1);
assert_eq!(inactive_apps[0].cell_data.len(), 2);
assert_matches!(
inactive_apps[0].status,
InstalledAppInfoStatus::Disabled {
reason: DisabledAppReason::User
}
);
conductor.enable_app("app".to_string()).await.unwrap();
conductor
.inner_handle()
.reconcile_cell_status_with_app_status()
.await
.unwrap();
let (_, cell) = app.into_tuple();
// - We can still make a zome call after reactivation
let _: HeaderHash = conductor
.call_fallible(&cell.zome("custom"), "create", ())
.await
.unwrap();
// - Ensure that the app is active
assert_eq_retry_10s!(conductor.list_running_apps().await.unwrap().len(), 1);
let inactive_apps = conductor
.list_apps(Some(AppStatusFilter::Disabled))
.await
.unwrap();
let active_apps = conductor
.list_apps(Some(AppStatusFilter::Enabled))
.await
.unwrap();
assert_eq!(active_apps.len(), 1);
assert_eq!(inactive_apps.len(), 0);
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Causing a tokio thread to panic is problematic.
This is supposed to emulate a panic in a wasm validation callback, but it's not the same.
However, when wasm panics, it returns an error anyway, so the other similar test
which tests for validation errors should be sufficient."]
async fn test_cells_disable_on_validation_panic() {
observability::test_run().ok();
let bad_zome =
InlineZome::new_unique(Vec::new()).callback("validate", |_api, _data: ValidateData| {
panic!("intentional panic during validation");
#[allow(unreachable_code)]
Ok(ValidateResult::Valid)
});
let mut conductor = SweetConductor::from_standard_config().await;
// This may be an error, depending on if validation runs before or after
// the app is enabled. Proceed in either case.
let _ = common_genesis_test_app(&mut conductor, bad_zome).await;
// - Ensure that the app was disabled because one Cell panicked during validation
// (while publishing genesis elements)
assert_eq_retry_10s!(
{
let state = conductor.get_state_from_handle().await.unwrap();
(state.enabled_apps().count(), state.stopped_apps().count())
},
(0, 1)
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_installation_fails_if_genesis_self_check_is_invalid() {
observability::test_run().ok();
let bad_zome = InlineZome::new_unique(Vec::new()).callback(
"genesis_self_check",
|_api, _data: GenesisSelfCheckData| {
Ok(GenesisSelfCheckResult::Invalid(
"intentional invalid result for testing".into(),
))
},
);
let mut conductor = SweetConductor::from_standard_config().await;
let err = if let Err(err) = common_genesis_test_app(&mut conductor, bad_zome).await {
err
} else {
panic!("this should have been an error")
};
if let ConductorApiError::ConductorError(inner) = err {
assert_matches!(*inner, ConductorError::GenesisFailed { errors } if errors.len() == 1);
} else {
panic!("this should have been an error too");
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_bad_entry_validation_after_genesis_returns_zome_call_error() {
observability::test_run().ok();
let unit_entry_def = EntryDef::default_with_id("unit");
let bad_zome = InlineZome::new_unique(vec![unit_entry_def.clone()])
.callback("validate_create_entry", |_api, _data: ValidateData| {
Ok(ValidateResult::Invalid(
"intentional invalid result for testing".into(),
))
})
.callback("create", move |api, ()| {
let entry_def_id: EntryDefId = unit_entry_def.id.clone();
let entry = Entry::app(().try_into().unwrap()).unwrap();
let hash = api.create(CreateInput::new(
entry_def_id,
entry,
ChainTopOrdering::default(),
))?;
Ok(hash)
});
let mut conductor = SweetConductor::from_standard_config().await;
let app = common_genesis_test_app(&mut conductor, bad_zome)
.await
.unwrap();
let (_, cell_bad) = app.into_tuple();
let result: ConductorApiResult<HeaderHash> = conductor
.call_fallible(&cell_bad.zome("custom"), "create", ())
.await;
// - The failed validation simply causes the zome call to return an error
assert_matches!(result, Err(_));
// - The app is not disabled
assert_eq_retry_10s!(
{
let state = conductor.get_state_from_handle().await.unwrap();
(state.running_apps().count(), state.stopped_apps().count())
},
(1, 0)
);
}
// TODO: we need a test with a failure during a validation callback that happens
// *inline*. It's not enough to have a failing validate_create_entry for
// instance, because that failure will be returned by the zome call.
//
// NB: currently the pre-genesis and post-genesis handling of panics is the same.
// If we implement [ B-04188 ], then this test will be made more possible.
// Otherwise, we have to devise a way to discover whether a panic happened
// during genesis or not.
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to figure out how to write this test"]
async fn test_apps_disable_on_panic_after_genesis() {
observability::test_run().ok();
let unit_entry_def = EntryDef::default_with_id("unit");
let bad_zome = InlineZome::new_unique(vec![unit_entry_def.clone()])
// We need a different validation callback that doesn't happen inline
// so we can cause failure in it. But it must also be after genesis.
.callback("validate_create_entry", |_api, _data: ValidateData| {
// Trigger a deserialization error
let _: Entry = SerializedBytes::try_from(())?.try_into()?;
Ok(ValidateResult::Valid)
})
.callback("create", move |api, ()| {
let entry_def_id: EntryDefId = unit_entry_def.id.clone();
let entry = Entry::app(().try_into().unwrap()).unwrap();
let hash = api.create(CreateInput::new(
entry_def_id,
entry,
ChainTopOrdering::default(),
))?;
Ok(hash)
});
let mut conductor = SweetConductor::from_standard_config().await;
let app = common_genesis_test_app(&mut conductor, bad_zome)
.await
.unwrap();
let (_, cell_bad) = app.into_tuple();
let _: ConductorApiResult<HeaderHash> = conductor
.call_fallible(&cell_bad.zome("custom"), "create", ())
.await;
assert_eq_retry_10s!(
{
let state = conductor.get_state_from_handle().await.unwrap();
(state.running_apps().count(), state.stopped_apps().count())
},
(0, 1)
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_app_status_states() {
observability::test_run().ok();
let zome = simple_create_entry_zome();
let mut conductor = SweetConductor::from_standard_config().await;
common_genesis_test_app(&mut conductor, zome).await.unwrap();
let all_apps = conductor.list_apps(None).await.unwrap();
assert_eq!(all_apps.len(), 1);
let get_status = || async { conductor.list_apps(None).await.unwrap()[0].status.clone() };
// RUNNING -pause-> PAUSED
conductor
.pause_app("app".to_string(), PausedAppReason::Error("because".into()))
.await
.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Paused { .. });
// PAUSED --start-> RUNNING
conductor.start_app("app".to_string()).await.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Running);
// RUNNING --disable-> DISABLED
conductor
.disable_app("app".to_string(), DisabledAppReason::User)
.await
.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Disabled { .. });
// DISABLED --start-> DISABLED
conductor.start_app("app".to_string()).await.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Disabled { .. });
// DISABLED --pause-> DISABLED
conductor
.pause_app("app".to_string(), PausedAppReason::Error("because".into()))
.await
.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Disabled { .. });
// DISABLED --enable-> ENABLED
conductor.enable_app("app".to_string()).await.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Running);
// RUNNING --pause-> PAUSED
conductor
.pause_app("app".to_string(), PausedAppReason::Error("because".into()))
.await
.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Paused { .. });
// PAUSED --enable-> RUNNING
conductor.enable_app("app".to_string()).await.unwrap();
assert_matches!(get_status().await, InstalledAppInfoStatus::Running);
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "we don't have the ability to share cells across apps yet, but will need a test for that once we do"]
async fn test_app_status_states_multi_app() {
todo!("write a test similar to the previous one, testing various state transitions, including switching on and off individual Cells");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_cell_and_app_status_reconciliation() {
observability::test_run().ok();
use AppStatusFx::*;
use AppStatusKind::*;
use CellStatus::*;
let mk_zome = || InlineZome::new_unique(Vec::new());
let dnas = [
mk_dna("zome", mk_zome()).await.unwrap().0,
mk_dna("zome", mk_zome()).await.unwrap().0,
mk_dna("zome", mk_zome()).await.unwrap().0,
];
let app_id = "app".to_string();
let mut conductor = SweetConductor::from_standard_config().await;
conductor.setup_app(&app_id, &dnas).await.unwrap();
let cell_ids = conductor.list_cell_ids(None);
let cell1 = &cell_ids[0..1];
let check = || async {
(
AppStatusKind::from(AppStatus::from(
conductor.list_apps(None).await.unwrap()[0].status.clone(),
)),
conductor.list_cell_ids(Some(Joined)).len(),
conductor.list_cell_ids(Some(PendingJoin)).len(),
)
};
assert_eq!(check().await, (Running, 3, 0));
// - Simulate a cell failing to join the network
conductor.update_cell_status(cell1, PendingJoin);
assert_eq!(check().await, (Running, 2, 1));
// - Reconciled app state is Paused due to one unjoined Cell
let delta = conductor
.reconcile_app_status_with_cell_status(None)
.await
.unwrap();
assert_eq!(delta, SpinDown);
assert_eq!(check().await, (Paused, 2, 1));
// - Can start the app again and get all cells joined
conductor.start_app(app_id.clone()).await.unwrap();
assert_eq!(check().await, (Running, 3, 0));
// - Simulate a cell being removed due to error
conductor.remove_cells(cell1).await;
assert_eq!(check().await, (Running, 2, 0));
// - Again, app state should be reconciled to Paused due to missing cell
let delta = conductor
.reconcile_app_status_with_cell_status(None)
.await
.unwrap();
assert_eq!(delta, SpinDown);
assert_eq!(check().await, (Paused, 2, 0));
// - Disabling the app causes all cells to be removed
conductor
.disable_app(app_id.clone(), DisabledAppReason::User)
.await
.unwrap();
assert_eq!(check().await, (Disabled, 0, 0));
// - Starting a disabled app does nothing
conductor.start_app(app_id.clone()).await.unwrap();
assert_eq!(check().await, (Disabled, 0, 0));
// - ...but enabling one does
conductor.enable_app(app_id).await.unwrap();
assert_eq!(check().await, (Running, 3, 0));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_app_status_filters() {
observability::test_run().ok();
let zome = InlineZome::new_unique(Vec::new());
let dnas = [mk_dna("dna", zome).await.unwrap().0];
let mut conductor = SweetConductor::from_standard_config().await;
conductor.setup_app("running", &dnas).await.unwrap();
conductor.setup_app("paused", &dnas).await.unwrap();
conductor.setup_app("disabled", &dnas).await.unwrap();
// put apps in the proper states for testing
conductor
.pause_app(
"paused".to_string(),
PausedAppReason::Error("because".into()),
)
.await
.unwrap();
conductor
.disable_app("disabled".to_string(), DisabledAppReason::User)
.await
.unwrap();
macro_rules! list_apps {
($filter: expr) => {
conductor.list_apps($filter).await.unwrap()
};
}
// Check the counts returned by each filter
use AppStatusFilter::*;
assert_eq!(list_apps!(None).len(), 3);
assert_eq!(
(
list_apps!(Some(Running)).len(),
list_apps!(Some(Stopped)).len(),
list_apps!(Some(Enabled)).len(),
list_apps!(Some(Disabled)).len(),
list_apps!(Some(Paused)).len(),
),
(1, 2, 2, 1, 1,)
);
// check that paused apps move to Running state on conductor restart
conductor.shutdown().await;
conductor.startup().await;
assert_eq!(list_apps!(None).len(), 3);
assert_eq!(
(
list_apps!(Some(Running)).len(),
list_apps!(Some(Stopped)).len(),
list_apps!(Some(Enabled)).len(),
list_apps!(Some(Disabled)).len(),
list_apps!(Some(Paused)).len(),
),
(2, 1, 2, 1, 0,)
);
}
/// Check that the init() callback is only ever called once, even under many
/// concurrent initial zome function calls
#[tokio::test(flavor = "multi_thread")]
async fn test_init_concurrency() {
observability::test_run().ok();
let num_inits = Arc::new(AtomicU32::new(0));
let num_calls = Arc::new(AtomicU32::new(0));
let num_inits_clone = num_inits.clone();
let num_calls_clone = num_calls.clone();
let zome = InlineZome::new_unique(vec![])
.callback("init", move |_, ()| {
num_inits.clone().fetch_add(1, Ordering::SeqCst);
Ok(InitCallbackResult::Pass)
})
.callback("zomefunc", move |_, ()| {
std::thread::sleep(std::time::Duration::from_millis(5));
num_calls.clone().fetch_add(1, Ordering::SeqCst);
Ok(())
});
let dnas = [mk_dna("zome", zome).await.unwrap().0];
let mut conductor = SweetConductor::from_standard_config().await;
let app = conductor.setup_app("app", &dnas).await.unwrap();
let (cell,) = app.into_tuple();
let conductor = Arc::new(conductor);
// Perform 100 concurrent zome calls
let num_iters = Arc::new(AtomicU32::new(0));
let call_tasks = (0..100 as u32).map(|_i| {
let conductor = conductor.clone();
let zome = cell.zome("zome");
let num_iters = num_iters.clone();
tokio::spawn(async move {
println!("i: {:?}", _i);
num_iters.fetch_add(1, Ordering::SeqCst);
let _: () = conductor.call(&zome, "zomefunc", ()).await;
})
});
let _ = futures::future::join_all(call_tasks).await;
assert_eq!(num_iters.fetch_add(0, Ordering::SeqCst), 100);
assert_eq!(num_calls_clone.fetch_add(0, Ordering::SeqCst), 100);
assert_eq!(num_inits_clone.fetch_add(0, Ordering::SeqCst), 1);
}
| 34.615036 | 138 | 0.632186 |
1e3bde444c9b50aa485181856620724295c2a49c | 44,445 | #[doc = "Register `CTRL` reader"]
pub struct R(crate::R<CTRL_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<CTRL_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<CTRL_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<CTRL_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `CTRL` writer"]
pub struct W(crate::W<CTRL_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<CTRL_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<CTRL_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<CTRL_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `SYNC` reader - USART Synchronous Mode"]
pub struct SYNC_R(crate::FieldReader<bool, bool>);
impl SYNC_R {
pub(crate) fn new(bits: bool) -> Self {
SYNC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SYNC_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SYNC` writer - USART Synchronous Mode"]
pub struct SYNC_W<'a> {
w: &'a mut W,
}
impl<'a> SYNC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Field `LOOPBK` reader - Loopback Enable"]
pub struct LOOPBK_R(crate::FieldReader<bool, bool>);
impl LOOPBK_R {
pub(crate) fn new(bits: bool) -> Self {
LOOPBK_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for LOOPBK_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `LOOPBK` writer - Loopback Enable"]
pub struct LOOPBK_W<'a> {
w: &'a mut W,
}
impl<'a> LOOPBK_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Field `CCEN` reader - Collision Check Enable"]
pub struct CCEN_R(crate::FieldReader<bool, bool>);
impl CCEN_R {
pub(crate) fn new(bits: bool) -> Self {
CCEN_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CCEN_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CCEN` writer - Collision Check Enable"]
pub struct CCEN_W<'a> {
w: &'a mut W,
}
impl<'a> CCEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `MPM` reader - Multi-Processor Mode"]
pub struct MPM_R(crate::FieldReader<bool, bool>);
impl MPM_R {
pub(crate) fn new(bits: bool) -> Self {
MPM_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MPM_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MPM` writer - Multi-Processor Mode"]
pub struct MPM_W<'a> {
w: &'a mut W,
}
impl<'a> MPM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
#[doc = "Field `MPAB` reader - Multi-Processor Address-Bit"]
pub struct MPAB_R(crate::FieldReader<bool, bool>);
impl MPAB_R {
pub(crate) fn new(bits: bool) -> Self {
MPAB_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MPAB_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MPAB` writer - Multi-Processor Address-Bit"]
pub struct MPAB_W<'a> {
w: &'a mut W,
}
impl<'a> MPAB_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
#[doc = "Oversampling\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum OVS_A {
#[doc = "0: Regular UART mode with 16X oversampling in asynchronous mode"]
X16 = 0,
#[doc = "1: Double speed with 8X oversampling in asynchronous mode"]
X8 = 1,
#[doc = "2: 6X oversampling in asynchronous mode"]
X6 = 2,
#[doc = "3: Quadruple speed with 4X oversampling in asynchronous mode"]
X4 = 3,
}
impl From<OVS_A> for u8 {
#[inline(always)]
fn from(variant: OVS_A) -> Self {
variant as _
}
}
#[doc = "Field `OVS` reader - Oversampling"]
pub struct OVS_R(crate::FieldReader<u8, OVS_A>);
impl OVS_R {
pub(crate) fn new(bits: u8) -> Self {
OVS_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OVS_A {
match self.bits {
0 => OVS_A::X16,
1 => OVS_A::X8,
2 => OVS_A::X6,
3 => OVS_A::X4,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `X16`"]
#[inline(always)]
pub fn is_x16(&self) -> bool {
**self == OVS_A::X16
}
#[doc = "Checks if the value of the field is `X8`"]
#[inline(always)]
pub fn is_x8(&self) -> bool {
**self == OVS_A::X8
}
#[doc = "Checks if the value of the field is `X6`"]
#[inline(always)]
pub fn is_x6(&self) -> bool {
**self == OVS_A::X6
}
#[doc = "Checks if the value of the field is `X4`"]
#[inline(always)]
pub fn is_x4(&self) -> bool {
**self == OVS_A::X4
}
}
impl core::ops::Deref for OVS_R {
type Target = crate::FieldReader<u8, OVS_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `OVS` writer - Oversampling"]
pub struct OVS_W<'a> {
w: &'a mut W,
}
impl<'a> OVS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OVS_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Regular UART mode with 16X oversampling in asynchronous mode"]
#[inline(always)]
pub fn x16(self) -> &'a mut W {
self.variant(OVS_A::X16)
}
#[doc = "Double speed with 8X oversampling in asynchronous mode"]
#[inline(always)]
pub fn x8(self) -> &'a mut W {
self.variant(OVS_A::X8)
}
#[doc = "6X oversampling in asynchronous mode"]
#[inline(always)]
pub fn x6(self) -> &'a mut W {
self.variant(OVS_A::X6)
}
#[doc = "Quadruple speed with 4X oversampling in asynchronous mode"]
#[inline(always)]
pub fn x4(self) -> &'a mut W {
self.variant(OVS_A::X4)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 5)) | ((value as u32 & 0x03) << 5);
self.w
}
}
#[doc = "Field `CLKPOL` reader - Clock Polarity"]
pub struct CLKPOL_R(crate::FieldReader<bool, bool>);
impl CLKPOL_R {
pub(crate) fn new(bits: bool) -> Self {
CLKPOL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CLKPOL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CLKPOL` writer - Clock Polarity"]
pub struct CLKPOL_W<'a> {
w: &'a mut W,
}
impl<'a> CLKPOL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8);
self.w
}
}
#[doc = "Field `CLKPHA` reader - Clock Edge For Setup/Sample"]
pub struct CLKPHA_R(crate::FieldReader<bool, bool>);
impl CLKPHA_R {
pub(crate) fn new(bits: bool) -> Self {
CLKPHA_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CLKPHA_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CLKPHA` writer - Clock Edge For Setup/Sample"]
pub struct CLKPHA_W<'a> {
w: &'a mut W,
}
impl<'a> CLKPHA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9);
self.w
}
}
#[doc = "Field `MSBF` reader - Most Significant Bit First"]
pub struct MSBF_R(crate::FieldReader<bool, bool>);
impl MSBF_R {
pub(crate) fn new(bits: bool) -> Self {
MSBF_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MSBF_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MSBF` writer - Most Significant Bit First"]
pub struct MSBF_W<'a> {
w: &'a mut W,
}
impl<'a> MSBF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10);
self.w
}
}
#[doc = "Field `CSMA` reader - Action On Slave-Select In Master Mode"]
pub struct CSMA_R(crate::FieldReader<bool, bool>);
impl CSMA_R {
pub(crate) fn new(bits: bool) -> Self {
CSMA_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CSMA_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CSMA` writer - Action On Slave-Select In Master Mode"]
pub struct CSMA_W<'a> {
w: &'a mut W,
}
impl<'a> CSMA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11);
self.w
}
}
#[doc = "Field `TXBIL` reader - TX Buffer Interrupt Level"]
pub struct TXBIL_R(crate::FieldReader<bool, bool>);
impl TXBIL_R {
pub(crate) fn new(bits: bool) -> Self {
TXBIL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXBIL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXBIL` writer - TX Buffer Interrupt Level"]
pub struct TXBIL_W<'a> {
w: &'a mut W,
}
impl<'a> TXBIL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12);
self.w
}
}
#[doc = "Field `RXINV` reader - Receiver Input Invert"]
pub struct RXINV_R(crate::FieldReader<bool, bool>);
impl RXINV_R {
pub(crate) fn new(bits: bool) -> Self {
RXINV_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RXINV_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXINV` writer - Receiver Input Invert"]
pub struct RXINV_W<'a> {
w: &'a mut W,
}
impl<'a> RXINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13);
self.w
}
}
#[doc = "Field `TXINV` reader - Transmitter output Invert"]
pub struct TXINV_R(crate::FieldReader<bool, bool>);
impl TXINV_R {
pub(crate) fn new(bits: bool) -> Self {
TXINV_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXINV_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXINV` writer - Transmitter output Invert"]
pub struct TXINV_W<'a> {
w: &'a mut W,
}
impl<'a> TXINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14);
self.w
}
}
#[doc = "Field `CSINV` reader - Chip Select Invert"]
pub struct CSINV_R(crate::FieldReader<bool, bool>);
impl CSINV_R {
pub(crate) fn new(bits: bool) -> Self {
CSINV_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CSINV_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CSINV` writer - Chip Select Invert"]
pub struct CSINV_W<'a> {
w: &'a mut W,
}
impl<'a> CSINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15);
self.w
}
}
#[doc = "Field `AUTOCS` reader - Automatic Chip Select"]
pub struct AUTOCS_R(crate::FieldReader<bool, bool>);
impl AUTOCS_R {
pub(crate) fn new(bits: bool) -> Self {
AUTOCS_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for AUTOCS_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `AUTOCS` writer - Automatic Chip Select"]
pub struct AUTOCS_W<'a> {
w: &'a mut W,
}
impl<'a> AUTOCS_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | ((value as u32 & 0x01) << 16);
self.w
}
}
#[doc = "Field `AUTOTRI` reader - Automatic TX Tristate"]
pub struct AUTOTRI_R(crate::FieldReader<bool, bool>);
impl AUTOTRI_R {
pub(crate) fn new(bits: bool) -> Self {
AUTOTRI_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for AUTOTRI_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `AUTOTRI` writer - Automatic TX Tristate"]
pub struct AUTOTRI_W<'a> {
w: &'a mut W,
}
impl<'a> AUTOTRI_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | ((value as u32 & 0x01) << 17);
self.w
}
}
#[doc = "Field `SCMODE` reader - SmartCard Mode"]
pub struct SCMODE_R(crate::FieldReader<bool, bool>);
impl SCMODE_R {
pub(crate) fn new(bits: bool) -> Self {
SCMODE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SCMODE_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SCMODE` writer - SmartCard Mode"]
pub struct SCMODE_W<'a> {
w: &'a mut W,
}
impl<'a> SCMODE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | ((value as u32 & 0x01) << 18);
self.w
}
}
#[doc = "Field `SCRETRANS` reader - SmartCard Retransmit"]
pub struct SCRETRANS_R(crate::FieldReader<bool, bool>);
impl SCRETRANS_R {
pub(crate) fn new(bits: bool) -> Self {
SCRETRANS_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SCRETRANS_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SCRETRANS` writer - SmartCard Retransmit"]
pub struct SCRETRANS_W<'a> {
w: &'a mut W,
}
impl<'a> SCRETRANS_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19);
self.w
}
}
#[doc = "Field `SKIPPERRF` reader - Skip Parity Error Frames"]
pub struct SKIPPERRF_R(crate::FieldReader<bool, bool>);
impl SKIPPERRF_R {
pub(crate) fn new(bits: bool) -> Self {
SKIPPERRF_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SKIPPERRF_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SKIPPERRF` writer - Skip Parity Error Frames"]
pub struct SKIPPERRF_W<'a> {
w: &'a mut W,
}
impl<'a> SKIPPERRF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20);
self.w
}
}
#[doc = "Field `BIT8DV` reader - Bit 8 Default Value"]
pub struct BIT8DV_R(crate::FieldReader<bool, bool>);
impl BIT8DV_R {
pub(crate) fn new(bits: bool) -> Self {
BIT8DV_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BIT8DV_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BIT8DV` writer - Bit 8 Default Value"]
pub struct BIT8DV_W<'a> {
w: &'a mut W,
}
impl<'a> BIT8DV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21);
self.w
}
}
#[doc = "Field `ERRSDMA` reader - Halt DMA On Error"]
pub struct ERRSDMA_R(crate::FieldReader<bool, bool>);
impl ERRSDMA_R {
pub(crate) fn new(bits: bool) -> Self {
ERRSDMA_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ERRSDMA_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ERRSDMA` writer - Halt DMA On Error"]
pub struct ERRSDMA_W<'a> {
w: &'a mut W,
}
impl<'a> ERRSDMA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | ((value as u32 & 0x01) << 22);
self.w
}
}
#[doc = "Field `ERRSRX` reader - Disable RX On Error"]
pub struct ERRSRX_R(crate::FieldReader<bool, bool>);
impl ERRSRX_R {
pub(crate) fn new(bits: bool) -> Self {
ERRSRX_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ERRSRX_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ERRSRX` writer - Disable RX On Error"]
pub struct ERRSRX_W<'a> {
w: &'a mut W,
}
impl<'a> ERRSRX_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 23)) | ((value as u32 & 0x01) << 23);
self.w
}
}
#[doc = "Field `ERRSTX` reader - Disable TX On Error"]
pub struct ERRSTX_R(crate::FieldReader<bool, bool>);
impl ERRSTX_R {
pub(crate) fn new(bits: bool) -> Self {
ERRSTX_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ERRSTX_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ERRSTX` writer - Disable TX On Error"]
pub struct ERRSTX_W<'a> {
w: &'a mut W,
}
impl<'a> ERRSTX_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | ((value as u32 & 0x01) << 24);
self.w
}
}
#[doc = "Field `SSSEARLY` reader - Synchronous Slave Setup Early"]
pub struct SSSEARLY_R(crate::FieldReader<bool, bool>);
impl SSSEARLY_R {
pub(crate) fn new(bits: bool) -> Self {
SSSEARLY_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SSSEARLY_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SSSEARLY` writer - Synchronous Slave Setup Early"]
pub struct SSSEARLY_W<'a> {
w: &'a mut W,
}
impl<'a> SSSEARLY_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | ((value as u32 & 0x01) << 25);
self.w
}
}
#[doc = "TX Delay Transmission\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum TXDELAY_A {
#[doc = "0: Frames are transmitted immediately"]
NONE = 0,
#[doc = "1: Transmission of new frames are delayed by a single baud period"]
SINGLE = 1,
#[doc = "2: Transmission of new frames are delayed by two baud periods"]
DOUBLE = 2,
#[doc = "3: Transmission of new frames are delayed by three baud periods"]
TRIPLE = 3,
}
impl From<TXDELAY_A> for u8 {
#[inline(always)]
fn from(variant: TXDELAY_A) -> Self {
variant as _
}
}
#[doc = "Field `TXDELAY` reader - TX Delay Transmission"]
pub struct TXDELAY_R(crate::FieldReader<u8, TXDELAY_A>);
impl TXDELAY_R {
pub(crate) fn new(bits: u8) -> Self {
TXDELAY_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXDELAY_A {
match self.bits {
0 => TXDELAY_A::NONE,
1 => TXDELAY_A::SINGLE,
2 => TXDELAY_A::DOUBLE,
3 => TXDELAY_A::TRIPLE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NONE`"]
#[inline(always)]
pub fn is_none(&self) -> bool {
**self == TXDELAY_A::NONE
}
#[doc = "Checks if the value of the field is `SINGLE`"]
#[inline(always)]
pub fn is_single(&self) -> bool {
**self == TXDELAY_A::SINGLE
}
#[doc = "Checks if the value of the field is `DOUBLE`"]
#[inline(always)]
pub fn is_double(&self) -> bool {
**self == TXDELAY_A::DOUBLE
}
#[doc = "Checks if the value of the field is `TRIPLE`"]
#[inline(always)]
pub fn is_triple(&self) -> bool {
**self == TXDELAY_A::TRIPLE
}
}
impl core::ops::Deref for TXDELAY_R {
type Target = crate::FieldReader<u8, TXDELAY_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXDELAY` writer - TX Delay Transmission"]
pub struct TXDELAY_W<'a> {
w: &'a mut W,
}
impl<'a> TXDELAY_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TXDELAY_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Frames are transmitted immediately"]
#[inline(always)]
pub fn none(self) -> &'a mut W {
self.variant(TXDELAY_A::NONE)
}
#[doc = "Transmission of new frames are delayed by a single baud period"]
#[inline(always)]
pub fn single(self) -> &'a mut W {
self.variant(TXDELAY_A::SINGLE)
}
#[doc = "Transmission of new frames are delayed by two baud periods"]
#[inline(always)]
pub fn double(self) -> &'a mut W {
self.variant(TXDELAY_A::DOUBLE)
}
#[doc = "Transmission of new frames are delayed by three baud periods"]
#[inline(always)]
pub fn triple(self) -> &'a mut W {
self.variant(TXDELAY_A::TRIPLE)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 26)) | ((value as u32 & 0x03) << 26);
self.w
}
}
#[doc = "Field `BYTESWAP` reader - Byteswap In Double Accesses"]
pub struct BYTESWAP_R(crate::FieldReader<bool, bool>);
impl BYTESWAP_R {
pub(crate) fn new(bits: bool) -> Self {
BYTESWAP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BYTESWAP_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BYTESWAP` writer - Byteswap In Double Accesses"]
pub struct BYTESWAP_W<'a> {
w: &'a mut W,
}
impl<'a> BYTESWAP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | ((value as u32 & 0x01) << 28);
self.w
}
}
#[doc = "Field `AUTOTX` reader - Always Transmit When RX Not Full"]
pub struct AUTOTX_R(crate::FieldReader<bool, bool>);
impl AUTOTX_R {
pub(crate) fn new(bits: bool) -> Self {
AUTOTX_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for AUTOTX_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `AUTOTX` writer - Always Transmit When RX Not Full"]
pub struct AUTOTX_W<'a> {
w: &'a mut W,
}
impl<'a> AUTOTX_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | ((value as u32 & 0x01) << 29);
self.w
}
}
#[doc = "Field `MVDIS` reader - Majority Vote Disable"]
pub struct MVDIS_R(crate::FieldReader<bool, bool>);
impl MVDIS_R {
pub(crate) fn new(bits: bool) -> Self {
MVDIS_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MVDIS_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MVDIS` writer - Majority Vote Disable"]
pub struct MVDIS_W<'a> {
w: &'a mut W,
}
impl<'a> MVDIS_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | ((value as u32 & 0x01) << 30);
self.w
}
}
#[doc = "Field `SMSDELAY` reader - Synchronous Master Sample Delay"]
pub struct SMSDELAY_R(crate::FieldReader<bool, bool>);
impl SMSDELAY_R {
pub(crate) fn new(bits: bool) -> Self {
SMSDELAY_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SMSDELAY_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SMSDELAY` writer - Synchronous Master Sample Delay"]
pub struct SMSDELAY_W<'a> {
w: &'a mut W,
}
impl<'a> SMSDELAY_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bit 0 - USART Synchronous Mode"]
#[inline(always)]
pub fn sync(&self) -> SYNC_R {
SYNC_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Loopback Enable"]
#[inline(always)]
pub fn loopbk(&self) -> LOOPBK_R {
LOOPBK_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Collision Check Enable"]
#[inline(always)]
pub fn ccen(&self) -> CCEN_R {
CCEN_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Multi-Processor Mode"]
#[inline(always)]
pub fn mpm(&self) -> MPM_R {
MPM_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Multi-Processor Address-Bit"]
#[inline(always)]
pub fn mpab(&self) -> MPAB_R {
MPAB_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bits 5:6 - Oversampling"]
#[inline(always)]
pub fn ovs(&self) -> OVS_R {
OVS_R::new(((self.bits >> 5) & 0x03) as u8)
}
#[doc = "Bit 8 - Clock Polarity"]
#[inline(always)]
pub fn clkpol(&self) -> CLKPOL_R {
CLKPOL_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - Clock Edge For Setup/Sample"]
#[inline(always)]
pub fn clkpha(&self) -> CLKPHA_R {
CLKPHA_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - Most Significant Bit First"]
#[inline(always)]
pub fn msbf(&self) -> MSBF_R {
MSBF_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Action On Slave-Select In Master Mode"]
#[inline(always)]
pub fn csma(&self) -> CSMA_R {
CSMA_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - TX Buffer Interrupt Level"]
#[inline(always)]
pub fn txbil(&self) -> TXBIL_R {
TXBIL_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - Receiver Input Invert"]
#[inline(always)]
pub fn rxinv(&self) -> RXINV_R {
RXINV_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - Transmitter output Invert"]
#[inline(always)]
pub fn txinv(&self) -> TXINV_R {
TXINV_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - Chip Select Invert"]
#[inline(always)]
pub fn csinv(&self) -> CSINV_R {
CSINV_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 16 - Automatic Chip Select"]
#[inline(always)]
pub fn autocs(&self) -> AUTOCS_R {
AUTOCS_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - Automatic TX Tristate"]
#[inline(always)]
pub fn autotri(&self) -> AUTOTRI_R {
AUTOTRI_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - SmartCard Mode"]
#[inline(always)]
pub fn scmode(&self) -> SCMODE_R {
SCMODE_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - SmartCard Retransmit"]
#[inline(always)]
pub fn scretrans(&self) -> SCRETRANS_R {
SCRETRANS_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - Skip Parity Error Frames"]
#[inline(always)]
pub fn skipperrf(&self) -> SKIPPERRF_R {
SKIPPERRF_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - Bit 8 Default Value"]
#[inline(always)]
pub fn bit8dv(&self) -> BIT8DV_R {
BIT8DV_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - Halt DMA On Error"]
#[inline(always)]
pub fn errsdma(&self) -> ERRSDMA_R {
ERRSDMA_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 23 - Disable RX On Error"]
#[inline(always)]
pub fn errsrx(&self) -> ERRSRX_R {
ERRSRX_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 24 - Disable TX On Error"]
#[inline(always)]
pub fn errstx(&self) -> ERRSTX_R {
ERRSTX_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - Synchronous Slave Setup Early"]
#[inline(always)]
pub fn sssearly(&self) -> SSSEARLY_R {
SSSEARLY_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bits 26:27 - TX Delay Transmission"]
#[inline(always)]
pub fn txdelay(&self) -> TXDELAY_R {
TXDELAY_R::new(((self.bits >> 26) & 0x03) as u8)
}
#[doc = "Bit 28 - Byteswap In Double Accesses"]
#[inline(always)]
pub fn byteswap(&self) -> BYTESWAP_R {
BYTESWAP_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 29 - Always Transmit When RX Not Full"]
#[inline(always)]
pub fn autotx(&self) -> AUTOTX_R {
AUTOTX_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 30 - Majority Vote Disable"]
#[inline(always)]
pub fn mvdis(&self) -> MVDIS_R {
MVDIS_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - Synchronous Master Sample Delay"]
#[inline(always)]
pub fn smsdelay(&self) -> SMSDELAY_R {
SMSDELAY_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - USART Synchronous Mode"]
#[inline(always)]
pub fn sync(&mut self) -> SYNC_W {
SYNC_W { w: self }
}
#[doc = "Bit 1 - Loopback Enable"]
#[inline(always)]
pub fn loopbk(&mut self) -> LOOPBK_W {
LOOPBK_W { w: self }
}
#[doc = "Bit 2 - Collision Check Enable"]
#[inline(always)]
pub fn ccen(&mut self) -> CCEN_W {
CCEN_W { w: self }
}
#[doc = "Bit 3 - Multi-Processor Mode"]
#[inline(always)]
pub fn mpm(&mut self) -> MPM_W {
MPM_W { w: self }
}
#[doc = "Bit 4 - Multi-Processor Address-Bit"]
#[inline(always)]
pub fn mpab(&mut self) -> MPAB_W {
MPAB_W { w: self }
}
#[doc = "Bits 5:6 - Oversampling"]
#[inline(always)]
pub fn ovs(&mut self) -> OVS_W {
OVS_W { w: self }
}
#[doc = "Bit 8 - Clock Polarity"]
#[inline(always)]
pub fn clkpol(&mut self) -> CLKPOL_W {
CLKPOL_W { w: self }
}
#[doc = "Bit 9 - Clock Edge For Setup/Sample"]
#[inline(always)]
pub fn clkpha(&mut self) -> CLKPHA_W {
CLKPHA_W { w: self }
}
#[doc = "Bit 10 - Most Significant Bit First"]
#[inline(always)]
pub fn msbf(&mut self) -> MSBF_W {
MSBF_W { w: self }
}
#[doc = "Bit 11 - Action On Slave-Select In Master Mode"]
#[inline(always)]
pub fn csma(&mut self) -> CSMA_W {
CSMA_W { w: self }
}
#[doc = "Bit 12 - TX Buffer Interrupt Level"]
#[inline(always)]
pub fn txbil(&mut self) -> TXBIL_W {
TXBIL_W { w: self }
}
#[doc = "Bit 13 - Receiver Input Invert"]
#[inline(always)]
pub fn rxinv(&mut self) -> RXINV_W {
RXINV_W { w: self }
}
#[doc = "Bit 14 - Transmitter output Invert"]
#[inline(always)]
pub fn txinv(&mut self) -> TXINV_W {
TXINV_W { w: self }
}
#[doc = "Bit 15 - Chip Select Invert"]
#[inline(always)]
pub fn csinv(&mut self) -> CSINV_W {
CSINV_W { w: self }
}
#[doc = "Bit 16 - Automatic Chip Select"]
#[inline(always)]
pub fn autocs(&mut self) -> AUTOCS_W {
AUTOCS_W { w: self }
}
#[doc = "Bit 17 - Automatic TX Tristate"]
#[inline(always)]
pub fn autotri(&mut self) -> AUTOTRI_W {
AUTOTRI_W { w: self }
}
#[doc = "Bit 18 - SmartCard Mode"]
#[inline(always)]
pub fn scmode(&mut self) -> SCMODE_W {
SCMODE_W { w: self }
}
#[doc = "Bit 19 - SmartCard Retransmit"]
#[inline(always)]
pub fn scretrans(&mut self) -> SCRETRANS_W {
SCRETRANS_W { w: self }
}
#[doc = "Bit 20 - Skip Parity Error Frames"]
#[inline(always)]
pub fn skipperrf(&mut self) -> SKIPPERRF_W {
SKIPPERRF_W { w: self }
}
#[doc = "Bit 21 - Bit 8 Default Value"]
#[inline(always)]
pub fn bit8dv(&mut self) -> BIT8DV_W {
BIT8DV_W { w: self }
}
#[doc = "Bit 22 - Halt DMA On Error"]
#[inline(always)]
pub fn errsdma(&mut self) -> ERRSDMA_W {
ERRSDMA_W { w: self }
}
#[doc = "Bit 23 - Disable RX On Error"]
#[inline(always)]
pub fn errsrx(&mut self) -> ERRSRX_W {
ERRSRX_W { w: self }
}
#[doc = "Bit 24 - Disable TX On Error"]
#[inline(always)]
pub fn errstx(&mut self) -> ERRSTX_W {
ERRSTX_W { w: self }
}
#[doc = "Bit 25 - Synchronous Slave Setup Early"]
#[inline(always)]
pub fn sssearly(&mut self) -> SSSEARLY_W {
SSSEARLY_W { w: self }
}
#[doc = "Bits 26:27 - TX Delay Transmission"]
#[inline(always)]
pub fn txdelay(&mut self) -> TXDELAY_W {
TXDELAY_W { w: self }
}
#[doc = "Bit 28 - Byteswap In Double Accesses"]
#[inline(always)]
pub fn byteswap(&mut self) -> BYTESWAP_W {
BYTESWAP_W { w: self }
}
#[doc = "Bit 29 - Always Transmit When RX Not Full"]
#[inline(always)]
pub fn autotx(&mut self) -> AUTOTX_W {
AUTOTX_W { w: self }
}
#[doc = "Bit 30 - Majority Vote Disable"]
#[inline(always)]
pub fn mvdis(&mut self) -> MVDIS_W {
MVDIS_W { w: self }
}
#[doc = "Bit 31 - Synchronous Master Sample Delay"]
#[inline(always)]
pub fn smsdelay(&mut self) -> SMSDELAY_W {
SMSDELAY_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ctrl](index.html) module"]
pub struct CTRL_SPEC;
impl crate::RegisterSpec for CTRL_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [ctrl::R](R) reader structure"]
impl crate::Readable for CTRL_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [ctrl::W](W) writer structure"]
impl crate::Writable for CTRL_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets CTRL to value 0"]
impl crate::Resettable for CTRL_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.030046 | 401 | 0.556936 |
111f44e7b87791d0ab356017c61b1180148d6ded | 2,617 | // Copyright 2018 foundationdb-rs developers, https://github.com/Clikengo/foundationdb-rs/graphs/contributors
// Copyright 2013-2018 Apple, Inc and the FoundationDB project authors.
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Error types for the Fdb crate
use std;
use std::ffi::CStr;
use std::fmt;
use crate::options;
use foundationdb_sys as fdb_sys;
pub(crate) fn eval(error_code: fdb_sys::fdb_error_t) -> FdbResult<()> {
let rust_code: i32 = error_code;
if rust_code == 0 {
Ok(())
} else {
Err(FdbError::from_code(error_code))
}
}
/// The Standard Error type of FoundationDB
#[derive(Debug, Copy, Clone)]
pub struct FdbError {
/// The FoundationDB error code
error_code: i32,
}
impl FdbError {
/// Converts from a raw foundationDB error code
pub fn from_code(error_code: fdb_sys::fdb_error_t) -> Self {
Self { error_code }
}
pub fn message(self) -> &'static str {
let error_str =
unsafe { CStr::from_ptr::<'static>(fdb_sys::fdb_get_error(self.error_code)) };
error_str
.to_str()
.expect("bad error string from FoundationDB")
}
fn is_error_predicate(self, predicate: options::ErrorPredicate) -> bool {
let check =
unsafe { fdb_sys::fdb_error_predicate(predicate.code() as i32, self.error_code) };
check != 0
}
/// Indicates the transaction may have succeeded, though not in a way the system can verify.
pub fn is_maybe_committed(self) -> bool {
self.is_error_predicate(options::ErrorPredicate::MaybeCommitted)
}
/// Indicates the operations in the transactions should be retried because of transient error.
pub fn is_retryable(self) -> bool {
self.is_error_predicate(options::ErrorPredicate::Retryable)
}
/// Indicates the transaction has not committed, though in a way that can be retried.
pub fn is_retryable_not_committed(self) -> bool {
self.is_error_predicate(options::ErrorPredicate::RetryableNotCommitted)
}
/// Raw foundationdb error code
pub fn code(self) -> i32 {
self.error_code
}
}
impl fmt::Display for FdbError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.message().fmt(f)
}
}
/// Alias for `Result<..., FdbError>`
pub type FdbResult<T> = Result<T, FdbError>;
| 31.154762 | 109 | 0.669087 |
4b6807f47e19e22c91d8b4f42bb0416b852aeb9e | 4,818 | use std::cell::RefCell;
use std::time::{Duration, Instant};
use ::sysinfo::*;
const CPU_REFRESH_INTERVAL: Duration = Duration::from_millis(200);
lazy_static! {
static ref PID: usize = sysinfo::get_current_pid().expect("Failed to get process ID") as usize;
}
thread_local! {
static SYSTEM: RefCell<System> = RefCell::new(System::default());
}
macro_rules! system {
(mut $sys:ident, $code:block) => {
SYSTEM.with(|sys| {
let mut $sys = sys.borrow_mut();
$code
})
};
(mut $sys:ident, $code:expr) => {
SYSTEM.with(|sys| {
let mut $sys = sys.borrow_mut();
$code
})
};
($sys:ident, $code:block) => {
SYSTEM.with(|sys| {
let $sys = sys.borrow_mut();
$code
})
};
($sys:ident, $code:expr) => {
SYSTEM.with(|sys| {
let $sys = sys.borrow_mut();
$code
})
};
}
macro_rules! process {
($sys:ident) => {{
$sys.refresh_process(*PID as _);
$sys.process(*PID as _).expect("Failed to get process information")
}};
}
lazy_static! {
static ref LOGICAL_CPUS: u16 = system!(sys, sys.processors().len()) as u16;
}
pub fn system_cpu_usage() -> f64 {
fn update_cpu_usage(cpu_usage: &mut f64, sys: &mut System) {
sys.refresh_cpu();
*cpu_usage = sys.global_processor_info().cpu_usage() as f64;
}
thread_local! {
static TOTAL_CPU_USAGE: RefCell<(f64, Instant)> = RefCell::new(({
let mut cpu_usage: f64 = 0.;
system!(mut sys, update_cpu_usage(&mut cpu_usage, &mut *sys));
cpu_usage
}, Instant::now()));
}
TOTAL_CPU_USAGE.with(|cell| {
let (ref mut cpu_usage, ref mut timestamp) = *cell.borrow_mut();
if timestamp.elapsed() > CPU_REFRESH_INTERVAL {
system!(mut sys, update_cpu_usage(cpu_usage, &mut *sys));
}
*cpu_usage
})
}
pub fn system_memory_usage() -> f64 {
system!(mut sys, {
sys.refresh_memory();
sys.used_memory() as f64 / 1024.
})
}
pub fn system_total_memory() -> f64 {
lazy_static! {
static ref TOTAL_MEMORY: f64 = system!(mut sys, {
sys.refresh_memory();
sys.total_memory() as f64 / 1024.
}) as f64;
}
*TOTAL_MEMORY
}
pub fn system_available_memory() -> f64 {
system!(mut sys, {
sys.refresh_memory();
sys.available_memory() as f64 / 1024.
})
}
pub fn process_cpu_usage() -> f64 {
fn update_cpu_usage(cpu_usage: &mut f64, process: &Process) {
*cpu_usage = (process.cpu_usage() as f64) / (*LOGICAL_CPUS as f64);
}
thread_local! {
static PROCESS_CPU_USAGE: RefCell<(f64, Instant)> = RefCell::new(({
lazy_static::initialize(&LOGICAL_CPUS);
let mut cpu_usage: f64 = 0.;
system!(mut sys, {
let process = process!(sys);
update_cpu_usage(&mut cpu_usage, process);
});
cpu_usage
}, Instant::now()));
}
PROCESS_CPU_USAGE.with(|cell| {
let (ref mut cpu_usage, ref mut timestamp) = *cell.borrow_mut();
if timestamp.elapsed() > CPU_REFRESH_INTERVAL {
system!(mut sys, {
let process = process!(sys);
update_cpu_usage(cpu_usage, process);
});
}
*cpu_usage
})
}
pub fn process_memory_usage() -> f64 {
system!(mut sys, {
sys.refresh_memory();
let process = process!(sys);
process.memory() as f64 / 1024.
})
}
pub fn logical_cpus() -> u16 {
*LOGICAL_CPUS
}
pub fn physical_cpus() -> u16 {
lazy_static! {
static ref PHYSICAL_CPUS: u16 = system!(mut sys, {
sys.refresh_cpu();
sys.physical_core_count().unwrap_or(0)
}) as u16;
}
*PHYSICAL_CPUS
}
pub fn all() -> (AllSystem, AllProcess) {
(all_system(), all_process())
}
#[derive(Copy, Clone, Debug)]
pub struct AllSystem {
pub cpu_usage: f64,
pub memory_usage: f64,
pub total_memory: f64,
pub available_memory: f64,
pub logical_cpus: u16,
pub physical_cpus: u16,
}
pub fn all_system() -> AllSystem {
AllSystem {
cpu_usage: system_cpu_usage(),
memory_usage: system_memory_usage(),
total_memory: system_total_memory(),
available_memory: system_available_memory(),
logical_cpus: logical_cpus(),
physical_cpus: physical_cpus(),
}
}
#[derive(Copy, Clone, Debug)]
pub struct AllProcess {
pub cpu_usage: f64,
pub memory_usage: f64,
}
pub fn all_process() -> AllProcess {
AllProcess {
cpu_usage: process_cpu_usage(),
memory_usage: process_memory_usage(),
}
}
#[derive(Copy, Clone, Debug)]
pub struct RealtimeData {
pub system_cpu_usage: f64,
pub system_memory_usage: f64,
pub system_available_memory: f64,
pub process_cpu_usage: f64,
pub process_memory_usage: f64,
}
pub fn realtime() -> RealtimeData {
RealtimeData {
system_cpu_usage: system_cpu_usage(),
system_memory_usage: system_memory_usage(),
system_available_memory: system_available_memory(),
process_cpu_usage: process_cpu_usage(),
process_memory_usage: process_memory_usage(),
}
} | 23.163462 | 97 | 0.647156 |
fc006fe971ea57edb5e2321f7041cddb8e7bf100 | 1,527 | #[doc = "Register `RXDATA` reader"]
pub struct R(crate::R<RXDATA_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<RXDATA_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<RXDATA_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<RXDATA_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `RXDATA` reader - RX Data"]
pub struct RXDATA_R(crate::FieldReader<u8, u8>);
impl RXDATA_R {
pub(crate) fn new(bits: u8) -> Self {
RXDATA_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RXDATA_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:7 - RX Data"]
#[inline(always)]
pub fn rxdata(&self) -> RXDATA_R {
RXDATA_R::new((self.bits & 0xff) as u8)
}
}
#[doc = "Receive Buffer Data Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [rxdata](index.html) module"]
pub struct RXDATA_SPEC;
impl crate::RegisterSpec for RXDATA_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [rxdata::R](R) reader structure"]
impl crate::Readable for RXDATA_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets RXDATA to value 0"]
impl crate::Resettable for RXDATA_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 28.811321 | 237 | 0.616896 |
fc1bc0872f94558f396f5ede783b0480efa2c552 | 116,855 | use indy_api_types::{CommandHandle, ErrorCode, PoolHandle, WalletHandle};
use indy_api_types::errors::prelude::*;
use indy_api_types::validation::Validatable;
use indy_utils::ctypes;
use libc::c_char;
use serde_json;
use crate::commands::{Command, CommandExecutor};
use crate::commands::ledger::LedgerCommand;
use crate::domain::anoncreds::credential_definition::{CredentialDefinition, CredentialDefinitionId};
use crate::domain::anoncreds::revocation_registry_definition::{RevocationRegistryDefinition, RevocationRegistryId};
use crate::domain::anoncreds::revocation_registry_delta::RevocationRegistryDelta;
use crate::domain::anoncreds::schema::{Schema, SchemaId};
use crate::domain::crypto::did::DidValue;
use crate::domain::ledger::auth_rule::{AuthRules, Constraint};
use crate::domain::ledger::author_agreement::{AcceptanceMechanisms, GetTxnAuthorAgreementData};
use crate::domain::ledger::node::NodeOperationData;
use crate::domain::ledger::pool::Schedule;
/// Signs and submits request message to validator pool.
///
/// Adds submitter information to passed request json, signs it with submitter
/// sign key (see wallet_sign), and sends signed request message
/// to validator pool (see write_request).
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// pool_handle: pool handle (created by open_pool_ledger).
/// wallet_handle: wallet handle (created by open_wallet).
/// submitter_did: Id of Identity stored in secured Wallet.
/// request_json: Request data json.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
/// Wallet*
/// Ledger*
/// Crypto*
#[no_mangle]
pub extern fn indy_sign_and_submit_request(command_handle: CommandHandle,
pool_handle: PoolHandle,
wallet_handle: WalletHandle,
submitter_did: *const c_char,
request_json: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_result_json: *const c_char)>) -> ErrorCode {
trace!("indy_sign_and_submit_request: >>> pool_handle: {:?}, wallet_handle: {:?}, submitter_did: {:?}, request_json: {:?}",
pool_handle, wallet_handle, submitter_did, request_json);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam4);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_sign_and_submit_request: entities >>> pool_handle: {:?}, wallet_handle: {:?}, submitter_did: {:?}, request_json: {:?}",
pool_handle, wallet_handle, submitter_did, request_json);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::SignAndSubmitRequest(
pool_handle,
wallet_handle,
submitter_did,
request_json,
boxed_callback_string!("indy_sign_and_submit_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_sign_and_submit_request: <<< res: {:?}", res);
res
}
/// Publishes request message to validator pool (no signing, unlike sign_and_submit_request).
///
/// The request is sent to the validator pool as is. It's assumed that it's already prepared.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// pool_handle: pool handle (created by open_pool_ledger).
/// request_json: Request data json.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
/// Ledger*
#[no_mangle]
pub extern fn indy_submit_request(command_handle: CommandHandle,
pool_handle: PoolHandle,
request_json: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_result_json: *const c_char)>) -> ErrorCode {
trace!("indy_submit_request: >>> pool_handle: {:?}, request_json: {:?}", pool_handle, request_json);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam3);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_submit_request: entities >>> pool_handle: {:?}, request_json: {:?}", pool_handle, request_json);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::SubmitRequest(
pool_handle,
request_json,
boxed_callback_string!("indy_submit_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_submit_request: <<< res: {:?}", res);
res
}
/// Send action to particular nodes of validator pool.
///
/// The list of requests can be send:
/// POOL_RESTART
/// GET_VALIDATOR_INFO
///
/// The request is sent to the nodes as is. It's assumed that it's already prepared.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// pool_handle: pool handle (created by open_pool_ledger).
/// request_json: Request data json.
/// nodes: (Optional) List of node names to send the request.
/// ["Node1", "Node2",...."NodeN"]
/// timeout: (Optional) Time to wait respond from nodes (override the default timeout) (in sec).
/// Pass -1 to use default timeout
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
/// Ledger*
#[no_mangle]
pub extern fn indy_submit_action(command_handle: CommandHandle,
pool_handle: PoolHandle,
request_json: *const c_char,
nodes: *const c_char,
timeout: i32,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_result_json: *const c_char)>) -> ErrorCode {
trace!("indy_submit_action: >>> pool_handle: {:?}, request_json: {:?}, nodes: {:?}, timeout: {:?}", pool_handle, request_json, nodes, timeout);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam3);
check_useful_opt_c_str!(nodes, ErrorCode::CommonInvalidParam4);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam6);
let timeout = if timeout != -1 { Some(timeout) } else { None };
trace!("indy_submit_action: entities >>> pool_handle: {:?}, request_json: {:?}, nodes: {:?}, timeout: {:?}", pool_handle, request_json, nodes, timeout);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::SubmitAction(
pool_handle,
request_json,
nodes,
timeout,
boxed_callback_string!("indy_submit_action", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_submit_action: <<< res: {:?}", res);
res
}
/// Signs request message.
///
/// Adds submitter information to passed request json, signs it with submitter
/// sign key (see wallet_sign).
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// wallet_handle: wallet handle (created by open_wallet).
/// submitter_did: Id of Identity stored in secured Wallet.
/// request_json: Request data json.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Signed request json.
///
/// #Errors
/// Common*
/// Wallet*
/// Ledger*
/// Crypto*
#[no_mangle]
pub extern fn indy_sign_request(command_handle: CommandHandle,
wallet_handle: WalletHandle,
submitter_did: *const c_char,
request_json: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
signed_request_json: *const c_char)>) -> ErrorCode {
trace!("indy_sign_request: >>> wallet_handle: {:?}, submitter_did: {:?}, request_json: {:?}", wallet_handle, submitter_did, request_json);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam3);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_sign_request: entities >>> wallet_handle: {:?}, submitter_did: {:?}, request_json: {:?}", wallet_handle, submitter_did, request_json);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::SignRequest(
wallet_handle,
submitter_did,
request_json,
boxed_callback_string!("indy_sign_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_sign_request: <<< res: {:?}", res);
res
}
/// Multi signs request message.
///
/// Adds submitter information to passed request json, signs it with submitter
/// sign key (see wallet_sign).
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// wallet_handle: wallet handle (created by open_wallet).
/// submitter_did: Id of Identity stored in secured Wallet.
/// request_json: Request data json.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Signed request json.
///
/// #Errors
/// Common*
/// Wallet*
/// Ledger*
/// Crypto*
#[no_mangle]
pub extern fn indy_multi_sign_request(command_handle: CommandHandle,
wallet_handle: WalletHandle,
submitter_did: *const c_char,
request_json: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle, err: ErrorCode,
signed_request_json: *const c_char)>) -> ErrorCode {
trace!("indy_multi_sign_request: >>> wallet_handle: {:?}, submitter_did: {:?}, request_json: {:?}", wallet_handle, submitter_did, request_json);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam3);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_multi_sign_request: entities >>> wallet_handle: {:?}, submitter_did: {:?}, request_json: {:?}", wallet_handle, submitter_did, request_json);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::MultiSignRequest(
wallet_handle,
submitter_did,
request_json,
boxed_callback_string!("indy_multi_sign_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_multi_sign_request: <<< res: {:?}", res);
res
}
/// Builds a request to get a DDO.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// target_did: Target DID as base58-encoded string for 16 or 32 bit DID value.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_ddo_request(command_handle: CommandHandle,
submitter_did: *const c_char,
target_did: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_ddo_request: >>> submitter_did: {:?}, target_did: {:?}", submitter_did, target_did);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(target_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_get_ddo_request: entities >>> submitter_did: {:?}, target_did: {:?}", submitter_did, target_did);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetDdoRequest(
submitter_did,
target_did,
boxed_callback_string!("indy_build_get_ddo_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_ddo_request: <<< res: {:?}", res);
res
}
/// Builds a NYM request. Request to create a new NYM record for a specific user.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// target_did: Target DID as base58-encoded string for 16 or 32 bit DID value.
/// verkey: Target identity verification key as base58-encoded string.
/// alias: NYM's alias.
/// role: Role of a user NYM record:
/// null (common USER)
/// TRUSTEE
/// STEWARD
/// TRUST_ANCHOR
/// ENDORSER - equal to TRUST_ANCHOR that will be removed soon
/// NETWORK_MONITOR
/// empty string to reset role
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_nym_request(command_handle: CommandHandle,
submitter_did: *const c_char,
target_did: *const c_char,
verkey: *const c_char,
alias: *const c_char,
role: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_nym_request: >>> submitter_did: {:?}, target_did: {:?}, verkey: {:?}, alias: {:?}, role: {:?}",
submitter_did, target_did, verkey, alias, role);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(target_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_opt_c_str!(verkey, ErrorCode::CommonInvalidParam4);
check_useful_opt_c_str!(alias, ErrorCode::CommonInvalidParam5);
check_useful_opt_c_str!(role, ErrorCode::CommonInvalidParam6);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam7);
trace!("indy_build_nym_request: entities >>> submitter_did: {:?}, target_did: {:?}, verkey: {:?}, alias: {:?}, role: {:?}",
submitter_did, target_did, verkey, alias, role);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildNymRequest(
submitter_did,
target_did,
verkey,
alias,
role,
boxed_callback_string!("indy_build_nym_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_nym_request: <<< res: {:?}", res);
res
}
/// Builds a GET_NYM request. Request to get information about a DID (NYM).
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// target_did: Target DID as base58-encoded string for 16 or 32 bit DID value.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_nym_request(command_handle: CommandHandle,
submitter_did: *const c_char,
target_did: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_nym_request: >>> submitter_did: {:?}, target_did: {:?}", submitter_did, target_did);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(target_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_get_nym_request: entities >>> submitter_did: {:?}, target_did: {:?}", submitter_did, target_did);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetNymRequest(
submitter_did,
target_did,
boxed_callback_string!("indy_build_get_nym_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_nym_request: <<< res: {:?}", res);
res
}
/// Parse a GET_NYM response to get NYM data.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// get_nym_response: response on GET_NYM request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// NYM data
/// {
/// did: DID as base58-encoded string for 16 or 32 bit DID value.
/// verkey: verification key as base58-encoded string.
/// role: Role associated number
/// null (common USER)
/// 0 - TRUSTEE
/// 2 - STEWARD
/// 101 - TRUST_ANCHOR
/// 101 - ENDORSER - equal to TRUST_ANCHOR that will be removed soon
/// 201 - NETWORK_MONITOR
/// }
///
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_parse_get_nym_response(command_handle: CommandHandle,
get_nym_response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
nym_json: *const c_char)>) -> ErrorCode {
trace!("indy_parse_get_nym_response: >>> get_nym_response: {:?}", get_nym_response);
check_useful_c_str!(get_nym_response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_parse_get_nym_response: entities >>> get_nym_response: {:?}", get_nym_response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::ParseGetNymResponse(
get_nym_response,
boxed_callback_string!("indy_parse_get_nym_response", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_parse_get_nym_response: <<< res: {:?}", res);
res
}
/// Builds an ATTRIB request. Request to add attribute to a NYM record.
///
/// Note: one of the fields `hash`, `raw`, `enc` must be specified.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// target_did: Target DID as base58-encoded string for 16 or 32 bit DID value.
/// hash: (Optional) Hash of attribute data.
/// raw: (Optional) Json, where key is attribute name and value is attribute value.
/// enc: (Optional) Encrypted value attribute data.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_attrib_request(command_handle: CommandHandle,
submitter_did: *const c_char,
target_did: *const c_char,
hash: *const c_char,
raw: *const c_char,
enc: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_attrib_request: >>> submitter_did: {:?}, target_did: {:?}, hash: {:?}, raw: {:?}, enc: {:?}",
submitter_did, target_did, hash, raw, enc);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(target_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_opt_c_str!(hash, ErrorCode::CommonInvalidParam4);
check_useful_opt_json!(raw, ErrorCode::CommonInvalidParam5, serde_json::Value);
check_useful_opt_c_str!(enc, ErrorCode::CommonInvalidParam6);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam7);
trace!("indy_build_attrib_request: entities >>> submitter_did: {:?}, target_did: {:?}, hash: {:?}, raw: {:?}, enc: {:?}",
submitter_did, target_did, hash, raw, enc);
if raw.is_none() && hash.is_none() && enc.is_none() {
return IndyError::from_msg(IndyErrorKind::InvalidStructure, "Either raw or hash or enc must be specified").into();
}
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildAttribRequest(
submitter_did,
target_did,
hash,
raw,
enc,
boxed_callback_string!("indy_build_attrib_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_attrib_request: <<< res: {:?}", res);
res
}
/// Builds a GET_ATTRIB request. Request to get information about an Attribute for the specified DID.
///
/// Note: one of the fields `hash`, `raw`, `enc` must be specified.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// target_did: Target DID as base58-encoded string for 16 or 32 bit DID value.
/// raw: (Optional) Requested attribute name.
/// hash: (Optional) Requested attribute hash.
/// enc: (Optional) Requested attribute encrypted value.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_attrib_request(command_handle: CommandHandle,
submitter_did: *const c_char,
target_did: *const c_char,
raw: *const c_char,
hash: *const c_char,
enc: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_attrib_request: >>> submitter_did: {:?}, target_did: {:?}, hash: {:?}, raw: {:?}, enc: {:?}",
submitter_did, target_did, hash, raw, enc);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(target_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_opt_c_str!(raw, ErrorCode::CommonInvalidParam4);
check_useful_opt_c_str!(hash, ErrorCode::CommonInvalidParam5);
check_useful_opt_c_str!(enc, ErrorCode::CommonInvalidParam6);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam7);
trace!("indy_build_get_attrib_request: entities >>> submitter_did: {:?}, target_did: {:?}, hash: {:?}, raw: {:?}, enc: {:?}",
submitter_did, target_did, hash, raw, enc);
if raw.is_none() && hash.is_none() && enc.is_none() {
return IndyError::from_msg(IndyErrorKind::InvalidStructure, "Either raw or hash or enc must be specified").into();
}
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetAttribRequest(
submitter_did,
target_did,
raw,
hash,
enc,
boxed_callback_string!("indy_build_get_attrib_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_attrib_request: <<< res: {:?}", res);
res
}
/// Builds a SCHEMA request. Request to add Credential's schema.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// data: Credential schema.
/// {
/// id: identifier of schema
/// attrNames: array of attribute name strings (the number of attributes should be less or equal than 125)
/// name: Schema's name string
/// version: Schema's version string,
/// ver: Version of the Schema json
/// }
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_schema_request(command_handle: CommandHandle,
submitter_did: *const c_char,
data: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_schema_request: >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_json!(data, ErrorCode::CommonInvalidParam3, Schema);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_schema_request: entities >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildSchemaRequest(
submitter_did,
data,
boxed_callback_string!("indy_build_schema_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_schema_request: <<< res: {:?}", res);
res
}
/// Builds a GET_SCHEMA request. Request to get Credential's Schema.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// id: Schema ID in ledger
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_schema_request(command_handle: CommandHandle,
submitter_did: *const c_char,
id: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_schema_request: >>> submitter_did: {:?}, id: {:?}", submitter_did, id);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(id, ErrorCode::CommonInvalidParam3, SchemaId);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_get_schema_request: entities >>> submitter_did: {:?}, id: {:?}", submitter_did, id);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetSchemaRequest(
submitter_did,
id,
boxed_callback_string!("indy_build_get_schema_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_schema_request: <<< res: {:?}", res);
res
}
/// Parse a GET_SCHEMA response to get Schema in the format compatible with Anoncreds API.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// get_schema_response: response of GET_SCHEMA request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Schema Id and Schema json.
/// {
/// id: identifier of schema
/// attrNames: array of attribute name strings
/// name: Schema's name string
/// version: Schema's version string
/// ver: Version of the Schema json
/// }
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_parse_get_schema_response(command_handle: CommandHandle,
get_schema_response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
schema_id: *const c_char,
schema_json: *const c_char)>) -> ErrorCode {
trace!("indy_parse_get_schema_response: >>> get_schema_response: {:?}", get_schema_response);
check_useful_c_str!(get_schema_response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_parse_get_schema_response: entities >>> get_schema_response: {:?}", get_schema_response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::ParseGetSchemaResponse(
get_schema_response,
Box::new(move |result| {
let (err, schema_id, schema_json) = prepare_result_2!(result, String::new(), String::new());
trace!("indy_parse_get_schema_response: schema_id: {:?}, schema_json: {:?}", schema_id, schema_json);
let schema_id = ctypes::string_to_cstring(schema_id);
let schema_json = ctypes::string_to_cstring(schema_json);
cb(command_handle, err, schema_id.as_ptr(), schema_json.as_ptr())
})
)));
let res = prepare_result!(result);
trace!("indy_parse_get_schema_response: <<< res: {:?}", res);
res
}
/// Builds an CRED_DEF request. Request to add a Credential Definition (in particular, public key),
/// that Issuer creates for a particular Credential Schema.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// data: credential definition json
/// {
/// id: string - identifier of credential definition
/// schemaId: string - identifier of stored in ledger schema
/// type: string - type of the credential definition. CL is the only supported type now.
/// tag: string - allows to distinct between credential definitions for the same issuer and schema
/// value: Dictionary with Credential Definition's data: {
/// primary: primary credential public key,
/// Optional<revocation>: revocation credential public key
/// },
/// ver: Version of the CredDef json
/// }
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_cred_def_request(command_handle: CommandHandle,
submitter_did: *const c_char,
data: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_result_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_cred_def_request: >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_json!(data, ErrorCode::CommonInvalidParam3, CredentialDefinition);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_cred_def_request: entities >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildCredDefRequest(
submitter_did,
data,
boxed_callback_string!("indy_build_cred_def_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_cred_def_request: <<< res: {:?}", res);
res
}
/// Builds a GET_CRED_DEF request. Request to get a Credential Definition (in particular, public key),
/// that Issuer creates for a particular Credential Schema.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// id: Credential Definition ID in ledger.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_cred_def_request(command_handle: CommandHandle,
submitter_did: *const c_char,
id: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_cred_def_request: >>> submitter_did: {:?}, id: {:?}", submitter_did, id);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(id, ErrorCode::CommonInvalidParam3, CredentialDefinitionId);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_get_cred_def_request: entities >>> submitter_did: {:?}, id: {:?}", submitter_did, id);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetCredDefRequest(
submitter_did,
id,
boxed_callback_string!("indy_build_get_cred_def_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_cred_def_request: <<< res: {:?}", res);
res
}
/// Parse a GET_CRED_DEF response to get Credential Definition in the format compatible with Anoncreds API.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// get_cred_def_response: response of GET_CRED_DEF request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Credential Definition Id and Credential Definition json.
/// {
/// id: string - identifier of credential definition
/// schemaId: string - identifier of stored in ledger schema
/// type: string - type of the credential definition. CL is the only supported type now.
/// tag: string - allows to distinct between credential definitions for the same issuer and schema
/// value: Dictionary with Credential Definition's data: {
/// primary: primary credential public key,
/// Optional<revocation>: revocation credential public key
/// },
/// ver: Version of the Credential Definition json
/// }
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_parse_get_cred_def_response(command_handle: CommandHandle,
get_cred_def_response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
cred_def_id: *const c_char,
cred_def_json: *const c_char)>) -> ErrorCode {
trace!("indy_parse_get_cred_def_response: >>> get_cred_def_response: {:?}", get_cred_def_response);
check_useful_c_str!(get_cred_def_response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_parse_get_cred_def_response: entities >>> get_cred_def_response: {:?}", get_cred_def_response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::ParseGetCredDefResponse(
get_cred_def_response,
Box::new(move |result| {
let (err, cred_def_id, cred_def_json) = prepare_result_2!(result, String::new(), String::new());
trace!("indy_parse_get_cred_def_response: cred_def_id: {:?}, cred_def_json: {:?}", cred_def_id, cred_def_json);
let cred_def_id = ctypes::string_to_cstring(cred_def_id);
let cred_def_json = ctypes::string_to_cstring(cred_def_json);
cb(command_handle, err, cred_def_id.as_ptr(), cred_def_json.as_ptr())
})
)));
let res = prepare_result!(result);
trace!("indy_parse_get_cred_def_response: <<< res: {:?}", res);
res
}
/// Builds a NODE request. Request to add a new node to the pool, or updates existing in the pool.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// target_did: Target Node's DID. It differs from submitter_did field.
/// data: Data associated with the Node: {
/// alias: string - Node's alias
/// blskey: string - (Optional) BLS multi-signature key as base58-encoded string.
/// blskey_pop: string - (Optional) BLS key proof of possession as base58-encoded string.
/// client_ip: string - (Optional) Node's client listener IP address.
/// client_port: string - (Optional) Node's client listener port.
/// node_ip: string - (Optional) The IP address other Nodes use to communicate with this Node.
/// node_port: string - (Optional) The port other Nodes use to communicate with this Node.
/// services: array<string> - (Optional) The service of the Node. VALIDATOR is the only supported one now.
/// }
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_node_request(command_handle: CommandHandle,
submitter_did: *const c_char,
target_did: *const c_char,
data: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_node_request: >>> submitter_did: {:?}, target_did: {:?}, data: {:?}", submitter_did, target_did, data);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(target_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_json!(data, ErrorCode::CommonInvalidParam4, NodeOperationData);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_build_node_request: entities >>> submitter_did: {:?}, target_did: {:?}, data: {:?}", submitter_did, target_did, data);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildNodeRequest(
submitter_did,
target_did,
data,
boxed_callback_string!("indy_build_node_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_node_request: <<< res: {:?}", res);
res
}
/// Builds a GET_VALIDATOR_INFO request.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: DID of the read request sender.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_validator_info_request(command_handle: CommandHandle,
submitter_did: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle, err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetValidatorInfoRequest(
submitter_did,
boxed_callback_string!("indy_build_get_validator_info_request", cb, command_handle)
)));
prepare_result!(result)
}
/// Builds a GET_TXN request. Request to get any transaction by its seq_no.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// ledger_type: (Optional) type of the ledger the requested transaction belongs to:
/// DOMAIN - used default,
/// POOL,
/// CONFIG
/// any number
/// seq_no: requested transaction sequence number as it's stored on Ledger.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_txn_request(command_handle: CommandHandle,
submitter_did: *const c_char,
ledger_type: *const c_char,
seq_no: i32,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_txn_request: >>> submitter_did: {:?}, ledger_type: {:?}, seq_no: {:?}", submitter_did, ledger_type, seq_no);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_opt_c_str!(ledger_type, ErrorCode::CommonInvalidParam4);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_build_get_txn_request: entities >>> submitter_did: {:?}, ledger_type: {:?}, seq_no: {:?}", submitter_did, ledger_type, seq_no);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetTxnRequest(
submitter_did,
ledger_type,
seq_no,
boxed_callback_string!("indy_build_get_txn_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_txn_request: <<< res: {:?}", res);
res
}
/// Builds a POOL_CONFIG request. Request to change Pool's configuration.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// writes: Whether any write requests can be processed by the pool
/// (if false, then pool goes to read-only state). True by default.
/// force: Whether we should apply transaction (for example, move pool to read-only state)
/// without waiting for consensus of this transaction.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_pool_config_request(command_handle: CommandHandle,
submitter_did: *const c_char,
writes: bool,
force: bool,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_pool_config_request: >>> submitter_did: {:?}, writes: {:?}, force: {:?}", submitter_did, writes, force);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_build_pool_config_request: entities >>> submitter_did: {:?}, writes: {:?}, force: {:?}", submitter_did, writes, force);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildPoolConfigRequest(
submitter_did,
writes,
force,
boxed_callback_string!("indy_build_pool_config_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_pool_config_request: <<< res: {:?}", res);
res
}
/// Builds a POOL_RESTART request.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// action: Action that pool has to do after received transaction. Either `start` or `cancel`.
/// datetime: <Optional> Restart time in datetime format. Skip to restart as early as possible.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_pool_restart_request(command_handle: CommandHandle,
submitter_did: *const c_char,
action: *const c_char,
datetime: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_pool_restart_request: >>> submitter_did: {:?}, action: {:?}, datetime: {:?}", submitter_did, action, datetime);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_str!(action, ErrorCode::CommonInvalidParam3);
check_useful_opt_c_str!(datetime, ErrorCode::CommonInvalidParam4);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_build_pool_restart_request: entities >>> submitter_did: {:?}, action: {:?}, datetime: {:?}", submitter_did, action, datetime);
if action != "start" && action != "cancel" {
return IndyError::from_msg(IndyErrorKind::InvalidStructure, format!("Unsupported action: {}. Must be either `start` or `cancel`", action)).into();
}
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildPoolRestartRequest(
submitter_did,
action,
datetime,
boxed_callback_string!("indy_build_pool_restart_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_pool_restart_request: <<< res: {:?}", res);
res
}
/// Builds a POOL_UPGRADE request. Request to upgrade the Pool (sent by Trustee).
/// It upgrades the specified Nodes (either all nodes in the Pool, or some specific ones).
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// name: Human-readable name for the upgrade.
/// version: The version of indy-node package we perform upgrade to.
/// Must be greater than existing one (or equal if reinstall flag is True).
/// action: Either start or cancel.
/// sha256: sha256 hash of the package.
/// timeout: (Optional) Limits upgrade time on each Node.
/// schedule: (Optional) Schedule of when to perform upgrade on each node. Map Node DIDs to upgrade time.
/// justification: (Optional) justification string for this particular Upgrade.
/// reinstall: Whether it's allowed to re-install the same version. False by default.
/// force: Whether we should apply transaction (schedule Upgrade) without waiting
/// for consensus of this transaction.
/// package: (Optional) Package to be upgraded.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_pool_upgrade_request(command_handle: CommandHandle,
submitter_did: *const c_char,
name: *const c_char,
version: *const c_char,
action: *const c_char,
sha256: *const c_char,
timeout: i32,
schedule: *const c_char,
justification: *const c_char,
reinstall: bool,
force: bool,
package: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_pool_upgrade_request: >>> submitter_did: {:?}, name: {:?}, version: {:?}, action: {:?}, sha256: {:?}, timeout: {:?}, \
schedule: {:?}, justification: {:?}, reinstall: {:?}, force: {:?}, package: {:?}",
submitter_did, name, version, action, sha256, timeout, schedule, justification, reinstall, force, package);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_str!(name, ErrorCode::CommonInvalidParam3);
check_useful_c_str!(version, ErrorCode::CommonInvalidParam4);
check_useful_c_str!(action, ErrorCode::CommonInvalidParam5);
check_useful_c_str!(sha256, ErrorCode::CommonInvalidParam6);
check_useful_opt_json!(schedule, ErrorCode::CommonInvalidParam8, Schedule);
check_useful_opt_c_str!(justification, ErrorCode::CommonInvalidParam9);
check_useful_opt_c_str!(package, ErrorCode::CommonInvalidParam12);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam13);
let timeout = if timeout != -1 { Some(timeout as u32) } else { None };
trace!("indy_build_pool_upgrade_request: entities >>> submitter_did: {:?}, name: {:?}, version: {:?}, action: {:?}, sha256: {:?}, timeout: {:?}, \
schedule: {:?}, justification: {:?}, reinstall: {:?}, force: {:?}, package: {:?}",
submitter_did, name, version, action, sha256, timeout, schedule, justification, reinstall, force, package);
if action != "start" && action != "cancel" {
return IndyError::from_msg(IndyErrorKind::InvalidStructure, format!("Invalid action: {}", action)).into();
}
if action == "start" && schedule.is_none() {
return IndyError::from_msg(IndyErrorKind::InvalidStructure, format!("Schedule is required for `{}` action", action)).into();
}
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildPoolUpgradeRequest(
submitter_did,
name,
version,
action,
sha256,
timeout,
schedule,
justification,
reinstall,
force,
package,
boxed_callback_string!("indy_build_pool_upgrade_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_pool_upgrade_request: <<< res: {:?}", res);
res
}
/// Builds a REVOC_REG_DEF request. Request to add the definition of revocation registry
/// to an exists credential definition.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// data: Revocation Registry data:
/// {
/// "id": string - ID of the Revocation Registry,
/// "revocDefType": string - Revocation Registry type (only CL_ACCUM is supported for now),
/// "tag": string - Unique descriptive ID of the Registry,
/// "credDefId": string - ID of the corresponding CredentialDefinition,
/// "value": Registry-specific data {
/// "issuanceType": string - Type of Issuance(ISSUANCE_BY_DEFAULT or ISSUANCE_ON_DEMAND),
/// "maxCredNum": number - Maximum number of credentials the Registry can serve.
/// "tailsHash": string - Hash of tails.
/// "tailsLocation": string - Location of tails file.
/// "publicKeys": <public_keys> - Registry's public key.
/// },
/// "ver": string - version of revocation registry definition json.
/// }
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_revoc_reg_def_request(command_handle: CommandHandle,
submitter_did: *const c_char,
data: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
rev_reg_def_req: *const c_char)>) -> ErrorCode {
trace!("indy_build_revoc_reg_def_request: >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_json!(data, ErrorCode::CommonInvalidParam3, RevocationRegistryDefinition);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_revoc_reg_def_request: entities >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildRevocRegDefRequest(
submitter_did,
data,
boxed_callback_string!("indy_build_revoc_reg_def_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_revoc_reg_def_request: <<< res: {:?}", res);
res
}
/// Builds a GET_REVOC_REG_DEF request. Request to get a revocation registry definition,
/// that Issuer creates for a particular Credential Definition.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// id: ID of Revocation Registry Definition in ledger.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_revoc_reg_def_request(command_handle: CommandHandle,
submitter_did: *const c_char,
id: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_revoc_reg_def_request: >>> submitter_did: {:?}, id: {:?}", submitter_did, id);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(id, ErrorCode::CommonInvalidParam3, RevocationRegistryId);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_get_revoc_reg_def_request: entities>>> submitter_did: {:?}, id: {:?}", submitter_did, id);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetRevocRegDefRequest(
submitter_did,
id,
boxed_callback_string!("indy_build_get_revoc_reg_def_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_revoc_reg_def_request: <<< res: {:?}", res);
res
}
/// Parse a GET_REVOC_REG_DEF response to get Revocation Registry Definition in the format
/// compatible with Anoncreds API.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// get_revoc_reg_def_response: response of GET_REVOC_REG_DEF request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Revocation Registry Definition Id and Revocation Registry Definition json.
/// {
/// "id": string - ID of the Revocation Registry,
/// "revocDefType": string - Revocation Registry type (only CL_ACCUM is supported for now),
/// "tag": string - Unique descriptive ID of the Registry,
/// "credDefId": string - ID of the corresponding CredentialDefinition,
/// "value": Registry-specific data {
/// "issuanceType": string - Type of Issuance(ISSUANCE_BY_DEFAULT or ISSUANCE_ON_DEMAND),
/// "maxCredNum": number - Maximum number of credentials the Registry can serve.
/// "tailsHash": string - Hash of tails.
/// "tailsLocation": string - Location of tails file.
/// "publicKeys": <public_keys> - Registry's public key.
/// },
/// "ver": string - version of revocation registry definition json.
/// }
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_parse_get_revoc_reg_def_response(command_handle: CommandHandle,
get_revoc_reg_def_response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
revoc_reg_def_id: *const c_char,
revoc_reg_def_json: *const c_char)>) -> ErrorCode {
trace!("indy_parse_get_revoc_reg_def_response: >>> get_revoc_reg_def_response: {:?}", get_revoc_reg_def_response);
check_useful_c_str!(get_revoc_reg_def_response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_parse_get_revoc_reg_def_response: entities >>> get_revoc_reg_def_response: {:?}", get_revoc_reg_def_response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::ParseGetRevocRegDefResponse(
get_revoc_reg_def_response,
Box::new(move |result| {
let (err, revoc_reg_def_id, revoc_reg_def_json) = prepare_result_2!(result, String::new(), String::new());
trace!("indy_parse_get_revoc_reg_def_response: revoc_reg_def_id: {:?}, revoc_reg_def_json: {:?}", revoc_reg_def_id, revoc_reg_def_json);
let revoc_reg_def_id = ctypes::string_to_cstring(revoc_reg_def_id);
let revoc_reg_def_json = ctypes::string_to_cstring(revoc_reg_def_json);
cb(command_handle, err, revoc_reg_def_id.as_ptr(), revoc_reg_def_json.as_ptr())
})
)));
let res = prepare_result!(result);
trace!("indy_parse_get_revoc_reg_def_response: <<< res: {:?}", res);
res
}
/// Builds a REVOC_REG_ENTRY request. Request to add the RevocReg entry containing
/// the new accumulator value and issued/revoked indices.
/// This is just a delta of indices, not the whole list.
/// So, it can be sent each time a new credential is issued/revoked.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// revoc_reg_def_id: ID of the corresponding RevocRegDef.
/// rev_def_type: Revocation Registry type (only CL_ACCUM is supported for now).
/// value: Registry-specific data: {
/// value: {
/// prevAccum: string - previous accumulator value.
/// accum: string - current accumulator value.
/// issued: array<number> - an array of issued indices.
/// revoked: array<number> an array of revoked indices.
/// },
/// ver: string - version revocation registry entry json
/// }
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_revoc_reg_entry_request(command_handle: CommandHandle,
submitter_did: *const c_char,
revoc_reg_def_id: *const c_char,
rev_def_type: *const c_char,
value: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_revoc_reg_entry_request: >>> submitter_did: {:?}, revoc_reg_def_id: {:?}, rev_def_type: {:?}, value: {:?}",
submitter_did, revoc_reg_def_id, rev_def_type, value);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(revoc_reg_def_id, ErrorCode::CommonInvalidParam3, RevocationRegistryId);
check_useful_c_str!(rev_def_type, ErrorCode::CommonInvalidParam4);
check_useful_json!(value, ErrorCode::CommonInvalidParam5, RevocationRegistryDelta);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam6);
trace!("indy_build_revoc_reg_entry_request: entities >>> submitter_did: {:?}, revoc_reg_def_id: {:?}, rev_def_type: {:?}, value: {:?}",
submitter_did, revoc_reg_def_id, rev_def_type, value);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildRevocRegEntryRequest(
submitter_did,
revoc_reg_def_id,
rev_def_type,
value,
boxed_callback_string!("indy_build_revoc_reg_entry_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_revoc_reg_entry_request: <<< res: {:?}", res);
res
}
/// Builds a GET_REVOC_REG request. Request to get the accumulated state of the Revocation Registry
/// by ID. The state is defined by the given timestamp.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
/// timestamp: Requested time represented as a total number of seconds from Unix Epoch
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_revoc_reg_request(command_handle: CommandHandle,
submitter_did: *const c_char,
revoc_reg_def_id: *const c_char,
timestamp: i64,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_revoc_reg_request: >>> submitter_did: {:?}, revoc_reg_def_id: {:?}, timestamp: {:?}", submitter_did, revoc_reg_def_id, timestamp);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(revoc_reg_def_id, ErrorCode::CommonInvalidParam3, RevocationRegistryId);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_build_get_revoc_reg_request: entities >>> submitter_did: {:?}, revoc_reg_def_id: {:?}, timestamp: {:?}", submitter_did, revoc_reg_def_id, timestamp);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetRevocRegRequest(
submitter_did,
revoc_reg_def_id,
timestamp,
boxed_callback_string!("indy_build_get_revoc_reg_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_revoc_reg_request: <<< res: {:?}", res);
res
}
/// Parse a GET_REVOC_REG response to get Revocation Registry in the format compatible with Anoncreds API.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// get_revoc_reg_response: response of GET_REVOC_REG request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Revocation Registry Definition Id, Revocation Registry json and Timestamp.
/// {
/// "value": Registry-specific data {
/// "accum": string - current accumulator value.
/// },
/// "ver": string - version revocation registry json
/// }
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_parse_get_revoc_reg_response(command_handle: CommandHandle,
get_revoc_reg_response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
revoc_reg_def_id: *const c_char,
revoc_reg_json: *const c_char,
timestamp: u64)>) -> ErrorCode {
trace!("indy_parse_get_revoc_reg_response: >>> get_revoc_reg_response: {:?}", get_revoc_reg_response);
check_useful_c_str!(get_revoc_reg_response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_parse_get_revoc_reg_response: entities >>> get_revoc_reg_response: {:?}", get_revoc_reg_response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::ParseGetRevocRegResponse(
get_revoc_reg_response,
Box::new(move |result| {
let (err, revoc_reg_def_id, revoc_reg_json, timestamp) = prepare_result_3!(result, String::new(), String::new(), 0);
trace!("indy_parse_get_revoc_reg_response: revoc_reg_def_id: {:?}, revoc_reg_json: {:?}, timestamp: {:?}",
revoc_reg_def_id, revoc_reg_json, timestamp);
let revoc_reg_def_id = ctypes::string_to_cstring(revoc_reg_def_id);
let revoc_reg_json = ctypes::string_to_cstring(revoc_reg_json);
cb(command_handle, err, revoc_reg_def_id.as_ptr(), revoc_reg_json.as_ptr(), timestamp)
})
)));
let res = prepare_result!(result);
trace!("indy_parse_get_revoc_reg_response: <<< res: {:?}", res);
res
}
/// Builds a GET_REVOC_REG_DELTA request. Request to get the delta of the accumulated state of the Revocation Registry.
/// The Delta is defined by from and to timestamp fields.
/// If from is not specified, then the whole state till to will be returned.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
/// from: Requested time represented as a total number of seconds from Unix Epoch
/// to: Requested time represented as a total number of seconds from Unix Epoch
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_revoc_reg_delta_request(command_handle: CommandHandle,
submitter_did: *const c_char,
revoc_reg_def_id: *const c_char,
from: i64,
to: i64,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_revoc_reg_request: >>> submitter_did: {:?}, revoc_reg_def_id: {:?}, from: {:?}, to: {:?}",
submitter_did, revoc_reg_def_id, from, to);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_string!(revoc_reg_def_id, ErrorCode::CommonInvalidParam3, RevocationRegistryId);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
let from = if from != -1 { Some(from) } else { None };
trace!("indy_build_get_revoc_reg_request: entities >>> submitter_did: {:?}, revoc_reg_def_id: {:?}, from: {:?}, to: {:?}",
submitter_did, revoc_reg_def_id, from, to);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetRevocRegDeltaRequest(
submitter_did,
revoc_reg_def_id,
from,
to,
boxed_callback_string!("indy_build_get_revoc_reg_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_revoc_reg_request: <<< res: {:?}", res);
res
}
/// Parse a GET_REVOC_REG_DELTA response to get Revocation Registry Delta in the format compatible with Anoncreds API.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// get_revoc_reg_response: response of GET_REVOC_REG_DELTA request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Revocation Registry Definition Id, Revocation Registry Delta json and Timestamp.
/// {
/// "value": Registry-specific data {
/// prevAccum: string - previous accumulator value.
/// accum: string - current accumulator value.
/// issued: array<number> - an array of issued indices.
/// revoked: array<number> an array of revoked indices.
/// },
/// "ver": string - version revocation registry delta json
/// }
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_parse_get_revoc_reg_delta_response(command_handle: CommandHandle,
get_revoc_reg_delta_response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
revoc_reg_def_id: *const c_char,
revoc_reg_delta_json: *const c_char,
timestamp: u64)>) -> ErrorCode {
trace!("indy_parse_get_revoc_reg_delta_response: >>> get_revoc_reg_delta_response: {:?}", get_revoc_reg_delta_response);
check_useful_c_str!(get_revoc_reg_delta_response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_parse_get_revoc_reg_delta_response: entities >>> get_revoc_reg_delta_response: {:?}", get_revoc_reg_delta_response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::ParseGetRevocRegDeltaResponse(
get_revoc_reg_delta_response,
Box::new(move |result| {
let (err, revoc_reg_def_id, revoc_reg_delta_json, timestamp) = prepare_result_3!(result, String::new(), String::new(), 0);
trace!("indy_parse_get_revoc_reg_delta_response: revoc_reg_def_id: {:?}, revoc_reg_delta_json: {:?}, timestamp: {:?}",
revoc_reg_def_id, revoc_reg_delta_json, timestamp);
let revoc_reg_def_id = ctypes::string_to_cstring(revoc_reg_def_id);
let revoc_reg_delta_json = ctypes::string_to_cstring(revoc_reg_delta_json);
cb(command_handle, err, revoc_reg_def_id.as_ptr(), revoc_reg_delta_json.as_ptr(), timestamp)
})
)));
let res = prepare_result!(result);
trace!("indy_parse_get_revoc_reg_delta_response: <<< res: {:?}", res);
res
}
/// Callback type for parsing Reply from Node to specific StateProof format
///
/// # params
/// reply_from_node: string representation of node's reply ("as is")
/// parsed_sp: out param to return serialized as string JSON with array of ParsedSP
///
/// # return
/// result ErrorCode
///
/// Note: this method allocate memory for result string `CustomFree` should be called to deallocate it
pub type CustomTransactionParser = extern fn(reply_from_node: *const c_char, parsed_sp: *mut *const c_char) -> ErrorCode;
/// Callback type to deallocate result buffer `parsed_sp` from `CustomTransactionParser`
pub type CustomFree = extern fn(data: *const c_char) -> ErrorCode;
/// Register callbacks (see type description for `CustomTransactionParser` and `CustomFree`
///
/// # params
/// command_handle: command handle to map callback to caller context.
/// txn_type: type of transaction to apply `parse` callback.
/// parse: required callback to parse reply for state proof.
/// free: required callback to deallocate memory.
/// cb: Callback that takes command result as parameter.
///
/// # returns
/// Status of callbacks registration.
///
/// # errors
/// Common*
#[no_mangle]
pub extern fn indy_register_transaction_parser_for_sp(command_handle: CommandHandle,
txn_type: *const c_char,
parser: Option<CustomTransactionParser>,
free: Option<CustomFree>,
cb: Option<extern fn(command_handle_: CommandHandle, err: ErrorCode)>) -> ErrorCode {
trace!("indy_register_transaction_parser_for_sp: >>> txn_type {:?}, parser {:?}, free {:?}",
txn_type, parser, free);
check_useful_c_str!(txn_type, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(parser, ErrorCode::CommonInvalidParam3);
check_useful_c_callback!(free, ErrorCode::CommonInvalidParam4);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
trace!("indy_register_transaction_parser_for_sp: entities: txn_type {}, parser {:?}, free {:?}",
txn_type, parser, free);
let res = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::RegisterSPParser(
txn_type,
parser,
free,
Box::new(move |res| {
let res = prepare_result!(res);
trace!("indy_register_transaction_parser_for_sp: res: {:?}", res);
cb(command_handle, res)
}),
)));
let res = prepare_result!(res);
trace!("indy_register_transaction_parser_for_sp: <<< res: {:?}", res);
res
}
/// Parse transaction response to fetch metadata.
/// The important use case for this method is validation of Node's response freshens.
///
/// Distributed Ledgers can reply with outdated information for consequence read request after write.
/// To reduce pool load libindy sends read requests to one random node in the pool.
/// Consensus validation is performed based on validation of nodes multi signature for current ledger Merkle Trie root.
/// This multi signature contains information about the latest ldeger's transaction ordering time and sequence number that this method returns.
///
/// If node that returned response for some reason is out of consensus and has outdated ledger
/// it can be caught by analysis of the returned latest ledger's transaction ordering time and sequence number.
///
/// There are two ways to filter outdated responses:
/// 1) based on "seqNo" - sender knows the sequence number of transaction that he consider as a fresh enough.
/// 2) based on "txnTime" - sender knows the timestamp that he consider as a fresh enough.
///
/// Note: response of GET_VALIDATOR_INFO request isn't supported
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// response: response of write or get request.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// response metadata.
/// {
/// "seqNo": Option<u64> - transaction sequence number,
/// "txnTime": Option<u64> - transaction ordering time,
/// "lastSeqNo": Option<u64> - the latest transaction seqNo for particular Node,
/// "lastTxnTime": Option<u64> - the latest transaction ordering time for particular Node
/// }
///
/// #Errors
/// Common*
/// Ledger*
#[no_mangle]
pub extern fn indy_get_response_metadata(command_handle: CommandHandle,
response: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
response_metadata: *const c_char)>) -> ErrorCode {
trace!("indy_get_response_metadata: >>> response: {:?}", response);
check_useful_c_str!(response, ErrorCode::CommonInvalidParam2);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
trace!("indy_get_response_metadata: entities >>> response: {:?}", response);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::GetResponseMetadata(
response,
boxed_callback_string!("indy_get_response_metadata", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_get_response_metadata: <<< res: {:?}", res);
res
}
/// Builds a LEDGERS_FREEZE request. Request to freeze list of ledgers.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// ledgers_ids: list of ledgers IDs for freezing ledgers (json format).
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_ledgers_freeze_request(command_handle: CommandHandle,
submitter_did: *const c_char,
ledgers_ids: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_ledgers_freeze_request: entities >>> submitter_did: {:?}, ledgers_ids: {:?}", submitter_did, ledgers_ids);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_json!(ledgers_ids, ErrorCode::CommonInvalidParam3, Vec<u64>);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildLedgersFreezeRequest(
submitter_did,
ledgers_ids,
boxed_callback_string!("indy_build_ledgers_freeze_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_ledgers_freeze_request: <<< res: {:?}", res);
res
}
/// Builds a GET_FROZEN_LEDGERS request. Request to get list of frozen ledgers.
/// frozen ledgers are defined by LEDGERS_FREEZE request.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
/// {
/// <ledger_id>: {
/// "ledger": String - Ledger root hash,
/// "state": String - State root hash,
/// "seq_no": u64 - the latest transaction seqNo for particular Node,
/// },
/// ...
/// }
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_frozen_ledgers_request(command_handle: CommandHandle,
submitter_did: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_frozen_ledgers_request: entities >>> submitter_did: {:?}", submitter_did);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam3);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetFrozenLedgersRequest(
submitter_did,
boxed_callback_string!("indy_build_get_frozen_ledgers_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_frozen_ledgers_request: <<< res: {:?}", res);
res
}
/// Builds a AUTH_RULE request. Request to change authentication rules for a ledger transaction.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// txn_type: ledger transaction alias or associated value.
/// action: type of an action.
/// Can be either "ADD" (to add a new rule) or "EDIT" (to edit an existing one).
/// field: transaction field.
/// old_value: (Optional) old value of a field, which can be changed to a new_value (mandatory for EDIT action).
/// new_value: (Optional) new value that can be used to fill the field.
/// constraint: set of constraints required for execution of an action in the following format:
/// {
/// constraint_id - <string> type of a constraint.
/// Can be either "ROLE" to specify final constraint or "AND"/"OR" to combine constraints.
/// role - <string> (optional) role of a user which satisfy to constrain.
/// sig_count - <u32> the number of signatures required to execution action.
/// need_to_be_owner - <bool> (optional) if user must be an owner of transaction (false by default).
/// off_ledger_signature - <bool> (optional) allow signature of unknow for ledger did (false by default).
/// metadata - <object> (optional) additional parameters of the constraint.
/// }
/// can be combined by
/// {
/// 'constraint_id': <"AND" or "OR">
/// 'auth_constraints': [<constraint_1>, <constraint_2>]
/// }
///
/// Default ledger auth rules: https://github.com/hyperledger/indy-node/blob/master/docs/source/auth_rules.md
///
/// More about AUTH_RULE request: https://github.com/hyperledger/indy-node/blob/master/docs/source/requests.md#auth_rule
///
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_auth_rule_request(command_handle: CommandHandle,
submitter_did: *const c_char,
txn_type: *const c_char,
action: *const c_char,
field: *const c_char,
old_value: *const c_char,
new_value: *const c_char,
constraint: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_auth_rule_request: >>> submitter_did: {:?}, txn_type: {:?}, action: {:?}, field: {:?}, \
old_value: {:?}, new_value: {:?}, constraint: {:?}",
submitter_did, txn_type, action, field, old_value, new_value, constraint);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_str!(txn_type, ErrorCode::CommonInvalidParam3);
check_useful_c_str!(action, ErrorCode::CommonInvalidParam4);
check_useful_c_str!(field, ErrorCode::CommonInvalidParam5);
check_useful_opt_c_str!(old_value, ErrorCode::CommonInvalidParam6);
check_useful_opt_c_str!(new_value, ErrorCode::CommonInvalidParam7);
check_useful_json!(constraint, ErrorCode::CommonInvalidParam8, Constraint);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam9);
trace!("indy_build_auth_rule_request: entities >>> submitter_did: {:?}, txn_type: {:?}, action: {:?}, field: {:?}, \
old_value: {:?}, new_value: {:?}, constraint: {:?}",
submitter_did, txn_type, action, field, old_value, new_value, constraint);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildAuthRuleRequest(
submitter_did,
txn_type,
action,
field,
old_value,
new_value,
constraint,
boxed_callback_string!("indy_build_auth_rule_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_auth_rule_request: <<< res: {:?}", res);
res
}
/// Builds a AUTH_RULES request. Request to change multiple authentication rules for a ledger transaction.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// rules: a list of auth rules: [
/// {
/// "auth_type": ledger transaction alias or associated value,
/// "auth_action": type of an action,
/// "field": transaction field,
/// "old_value": (Optional) old value of a field, which can be changed to a new_value (mandatory for EDIT action),
/// "new_value": (Optional) new value that can be used to fill the field,
/// "constraint": set of constraints required for execution of an action in the format described above for `indy_build_auth_rule_request` function.
/// },
/// ...
/// ]
///
/// Default ledger auth rules: https://github.com/hyperledger/indy-node/blob/master/docs/source/auth_rules.md
///
/// More about AUTH_RULES request: https://github.com/hyperledger/indy-node/blob/master/docs/source/requests.md#auth_rules
///
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_auth_rules_request(command_handle: CommandHandle,
submitter_did: *const c_char,
rules: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_auth_rules_request: >>> submitter_did: {:?}, rules: {:?}", submitter_did, rules);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_json!(rules, ErrorCode::CommonInvalidParam3, AuthRules);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
if rules.is_empty() {
return err_msg(IndyErrorKind::InvalidStructure, "Empty list of Auth Rules has been passed").into();
}
trace!("indy_build_auth_rules_request: entities >>> submitter_did: {:?}, rules: {:?}", submitter_did, rules);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildAuthRulesRequest(
submitter_did,
rules,
boxed_callback_string!("indy_build_auth_rules_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_auth_rules_request: <<< res: {:?}", res);
res
}
/// Builds a GET_AUTH_RULE request. Request to get authentication rules for ledger transactions.
///
/// NOTE: Either none or all transaction related parameters must be specified (`old_value` can be skipped for `ADD` action).
/// * none - to get all authentication rules for all ledger transactions
/// * all - to get authentication rules for specific action (`old_value` can be skipped for `ADD` action)
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// txn_type: (Optional) target ledger transaction alias or associated value.
/// action: (Optional) target action type. Can be either "ADD" or "EDIT".
/// field: (Optional) target transaction field.
/// old_value: (Optional) old value of field, which can be changed to a new_value (mandatory for EDIT action).
/// new_value: (Optional) new value that can be used to fill the field.
///
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_auth_rule_request(command_handle: CommandHandle,
submitter_did: *const c_char,
txn_type: *const c_char,
action: *const c_char,
field: *const c_char,
old_value: *const c_char,
new_value: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_auth_rule_request: >>> submitter_did: {:?}, txn_type: {:?}, action: {:?}, field: {:?}, \
old_value: {:?}, new_value: {:?}",
submitter_did, txn_type, action, field, old_value, new_value);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_opt_c_str!(txn_type, ErrorCode::CommonInvalidParam3);
check_useful_opt_c_str!(action, ErrorCode::CommonInvalidParam4);
check_useful_opt_c_str!(field, ErrorCode::CommonInvalidParam5);
check_useful_opt_c_str!(old_value, ErrorCode::CommonInvalidParam6);
check_useful_opt_c_str!(new_value, ErrorCode::CommonInvalidParam7);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam8);
trace!("indy_build_get_auth_rule_request: entities >>> submitter_did: {:?}, txn_type: {:?}, action: {:?}, field: {:?}, \
old_value: {:?}, new_value: {:?}",
submitter_did, txn_type, action, field, old_value, new_value);
let result = CommandExecutor::instance()
.send(Command::Ledger(LedgerCommand::BuildGetAuthRuleRequest(
submitter_did,
txn_type,
action,
field,
old_value,
new_value,
boxed_callback_string!("indy_build_get_auth_rule_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_auth_rule_request: <<< res: {:?}", res);
res
}
/// Builds a TXN_AUTHR_AGRMT request. Request to add a new version of Transaction Author Agreement to the ledger.
///
/// EXPERIMENTAL
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// text: (Optional) a content of the TTA.
/// Mandatory in case of adding a new TAA. An existing TAA text can not be changed.
/// for Indy Node version <= 1.12.0:
/// Use empty string to reset TAA on the ledger
/// for Indy Node version > 1.12.0
/// Should be omitted in case of updating an existing TAA (setting `retirement_ts`)
/// version: a version of the TTA (unique UTF-8 string).
/// ratification_ts: (Optional) the date (timestamp) of TAA ratification by network government. (-1 to omit)
/// for Indy Node version <= 1.12.0:
/// Must be omitted
/// for Indy Node version > 1.12.0:
/// Must be specified in case of adding a new TAA
/// Can be omitted in case of updating an existing TAA
/// retirement_ts: (Optional) the date (timestamp) of TAA retirement. (-1 to omit)
/// for Indy Node version <= 1.12.0:
/// Must be omitted
/// for Indy Node version > 1.12.0:
/// Must be omitted in case of adding a new (latest) TAA.
/// Should be used for updating (deactivating) non-latest TAA on the ledger.
///
/// Note: Use `indy_build_disable_all_txn_author_agreements_request` to disable all TAA's on the ledger.
///
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_txn_author_agreement_request(command_handle: CommandHandle,
submitter_did: *const c_char,
text: *const c_char,
version: *const c_char,
ratification_ts: i64,
retirement_ts: i64,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_txn_author_agreement_request: >>> submitter_did: {:?}, text: {:?}, version: {:?}, ratification_ts {:?}, retirement_ts {:?}",
submitter_did, text, version, ratification_ts, retirement_ts);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_opt_c_str!(text, ErrorCode::CommonInvalidParam3);
check_useful_c_str!(version, ErrorCode::CommonInvalidParam4);
check_useful_opt_u64!(ratification_ts, ErrorCode::CommonInvalidParam5);
check_useful_opt_u64!(retirement_ts, ErrorCode::CommonInvalidParam6);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam7);
trace!("indy_build_txn_author_agreement_request: entities >>> submitter_did: {:?}, text: {:?}, version: {:?}, ratification_ts {:?}, retirement_ts {:?}",
submitter_did, text, version, ratification_ts, retirement_ts);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildTxnAuthorAgreementRequest(
submitter_did,
text,
version,
ratification_ts,
retirement_ts,
boxed_callback_string!("indy_build_txn_author_agreement_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_txn_author_agreement_request: <<< res: {:?}", res);
res
}
/// Builds a DISABLE_ALL_TXN_AUTHR_AGRMTS request. Request to disable all Transaction Author Agreement on the ledger.
///
/// EXPERIMENTAL
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_disable_all_txn_author_agreements_request(command_handle: CommandHandle,
submitter_did: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_disable_all_txn_author_agreements_request: >>> submitter_did: {:?}", submitter_did);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam7);
trace!("indy_build_disable_all_txn_author_agreements_request: entities >>> submitter_did: {:?}", submitter_did);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildDisableAllTxnAuthorAgreementsRequest(
submitter_did,
boxed_callback_string!("indy_build_disable_all_txn_author_agreements_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_disable_all_txn_author_agreements_request: <<< res: {:?}", res);
res
}
/// Builds a GET_TXN_AUTHR_AGRMT request. Request to get a specific Transaction Author Agreement from the ledger.
///
/// EXPERIMENTAL
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// data: (Optional) specifies a condition for getting specific TAA.
/// Contains 3 mutually exclusive optional fields:
/// {
/// hash: Optional<str> - hash of requested TAA,
/// version: Optional<str> - version of requested TAA.
/// timestamp: Optional<u64> - ledger will return TAA valid at requested timestamp.
/// }
/// Null data or empty JSON are acceptable here. In this case, ledger will return the latest version of TAA.
///
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_txn_author_agreement_request(command_handle: CommandHandle,
submitter_did: *const c_char,
data: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_txn_author_agreement_request: >>> submitter_did: {:?}, data: {:?}?", submitter_did, data);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_opt_validatable_json!(data, ErrorCode::CommonInvalidParam3, GetTxnAuthorAgreementData);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_build_get_txn_author_agreement_request: entities >>> submitter_did: {:?}, data: {:?}", submitter_did, data);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildGetTxnAuthorAgreementRequest(
submitter_did,
data,
boxed_callback_string!("indy_build_get_txn_author_agreement_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_txn_author_agreement_request: <<< res: {:?}", res);
res
}
/// Builds a SET_TXN_AUTHR_AGRMT_AML request. Request to add a new list of acceptance mechanisms for transaction author agreement.
/// Acceptance Mechanism is a description of the ways how the user may accept a transaction author agreement.
///
/// EXPERIMENTAL
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: Identifier (DID) of the transaction author as base58-encoded string.
/// Actual request sender may differ if Endorser is used (look at `indy_append_request_endorser`)
/// aml: a set of new acceptance mechanisms:
/// {
/// “<acceptance mechanism label 1>”: { acceptance mechanism description 1},
/// “<acceptance mechanism label 2>”: { acceptance mechanism description 2},
/// ...
/// }
/// version: a version of new acceptance mechanisms. (Note: unique on the Ledger)
/// aml_context: (Optional) common context information about acceptance mechanisms (may be a URL to external resource).
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_acceptance_mechanisms_request(command_handle: CommandHandle,
submitter_did: *const c_char,
aml: *const c_char,
version: *const c_char,
aml_context: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_acceptance_mechanisms_request: >>> submitter_did: {:?}, aml: {:?}, version: {:?}, aml_context: {:?}",
submitter_did, aml, version, aml_context);
check_useful_validatable_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_validatable_json!(aml, ErrorCode::CommonInvalidParam3, AcceptanceMechanisms);
check_useful_c_str!(version, ErrorCode::CommonInvalidParam4);
check_useful_opt_c_str!(aml_context, ErrorCode::CommonInvalidParam5);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam6);
trace!("indy_build_acceptance_mechanisms_request: entities >>> submitter_did: {:?}, aml: {:?}, version: {:?}, aml_context: {:?}",
submitter_did, aml, version, aml_context);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildAcceptanceMechanismRequests(
submitter_did,
aml,
version,
aml_context,
boxed_callback_string!("indy_build_acceptance_mechanisms_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_acceptance_mechanisms_request: <<< res: {:?}", res);
res
}
/// Builds a GET_TXN_AUTHR_AGRMT_AML request. Request to get a list of acceptance mechanisms from the ledger
/// valid for specified time or the latest one.
///
/// EXPERIMENTAL
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
/// timestamp: i64 - time to get an active acceptance mechanisms. Pass -1 to get the latest one.
/// version: (Optional) version of acceptance mechanisms.
/// cb: Callback that takes command result as parameter.
///
/// NOTE: timestamp and version cannot be specified together.
///
/// #Returns
/// Request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_build_get_acceptance_mechanisms_request(command_handle: CommandHandle,
submitter_did: *const c_char,
timestamp: i64,
version: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_json: *const c_char)>) -> ErrorCode {
trace!("indy_build_get_acceptance_mechanisms_request: >>> submitter_did: {:?}, timestamp: {:?}, version: {:?}", submitter_did, timestamp, version);
check_useful_validatable_opt_string!(submitter_did, ErrorCode::CommonInvalidParam2, DidValue);
check_useful_opt_c_str!(version, ErrorCode::CommonInvalidParam4);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam5);
let timestamp = if timestamp != -1 { Some(timestamp as u64) } else { None };
trace!("indy_build_get_acceptance_mechanisms_request: entities >>> submitter_did: {:?}, timestamp: {:?}, version: {:?}", submitter_did, timestamp, version);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::BuildGetAcceptanceMechanismsRequest(
submitter_did,
timestamp,
version,
boxed_callback_string!("indy_build_get_acceptance_mechanisms_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_build_get_acceptance_mechanisms_request: <<< res: {:?}", res);
res
}
/// Append transaction author agreement acceptance data to a request.
/// This function should be called before signing and sending a request
/// if there is any transaction author agreement set on the Ledger.
///
/// EXPERIMENTAL
///
/// This function may calculate digest by itself or consume it as a parameter.
/// If all text, version and taa_digest parameters are specified, a check integrity of them will be done.
///
/// #Params
/// command_handle: command handle to map callback to caller context.
/// request_json: original request data json.
/// text and version - (optional) raw data about TAA from ledger.
/// These parameters should be passed together.
/// These parameters are required if taa_digest parameter is omitted.
/// taa_digest - (optional) digest on text and version.
/// Digest is sha256 hash calculated on concatenated strings: version || text.
/// This parameter is required if text and version parameters are omitted.
/// mechanism - mechanism how user has accepted the TAA
/// time - UTC timestamp when user has accepted the TAA. Note that the time portion will be discarded to avoid a privacy risk.
/// cb: Callback that takes command result as parameter.
///
/// #Returns
/// Updated request result as json.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_append_txn_author_agreement_acceptance_to_request(command_handle: CommandHandle,
request_json: *const c_char,
text: *const c_char,
version: *const c_char,
taa_digest: *const c_char,
mechanism: *const c_char,
time: u64,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
request_with_meta_json: *const c_char)>) -> ErrorCode {
trace!("indy_append_txn_author_agreement_acceptance_to_request: >>> request_json: {:?}, text: {:?}, version: {:?}, taa_digest: {:?}, \
mechanism: {:?}, time: {:?}",
request_json, text, version, taa_digest, mechanism, time);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam2);
check_useful_opt_c_str!(text, ErrorCode::CommonInvalidParam3);
check_useful_opt_c_str!(version, ErrorCode::CommonInvalidParam4);
check_useful_opt_c_str!(taa_digest, ErrorCode::CommonInvalidParam5);
check_useful_c_str!(mechanism, ErrorCode::CommonInvalidParam6);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam8);
trace!("indy_append_txn_author_agreement_acceptance_to_request: entities >>> request_json: {:?}, text: {:?}, version: {:?}, taa_digest: {:?}, \
mechanism: {:?}, time: {:?}",
request_json, text, version, taa_digest, mechanism, time);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::AppendTxnAuthorAgreementAcceptanceToRequest(
request_json,
text,
version,
taa_digest,
mechanism,
time,
boxed_callback_string!("indy_append_txn_author_agreement_acceptance_to_request", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_append_txn_author_agreement_acceptance_to_request: <<< res: {:?}", res);
res
}
/// Append Endorser to an existing request.
///
/// An author of request still is a `DID` used as a `submitter_did` parameter for the building of the request.
/// But it is expecting that the transaction will be sent by the specified Endorser.
///
/// Note: Both Transaction Author and Endorser must sign output request after that.
///
/// More about Transaction Endorser: https://github.com/hyperledger/indy-node/blob/master/design/transaction_endorser.md
/// https://github.com/hyperledger/indy-sdk/blob/master/docs/configuration.md
///
/// #Params
/// request_json: original request
/// endorser_did: DID of the Endorser that will submit the transaction.
/// The Endorser's DID must be present on the ledger.
/// cb: Callback that takes command result as parameter.
/// The command result is a request JSON with Endorser field appended.
///
/// #Errors
/// Common*
#[no_mangle]
pub extern fn indy_append_request_endorser(command_handle: CommandHandle,
request_json: *const c_char,
endorser_did: *const c_char,
cb: Option<extern fn(command_handle_: CommandHandle,
err: ErrorCode,
out_request_json: *const c_char)>) -> ErrorCode {
trace!("indy_append_request_endorser: >>> request_json: {:?}, endorser_did: {:?}",
request_json, endorser_did);
check_useful_c_str!(request_json, ErrorCode::CommonInvalidParam2);
check_useful_validatable_string!(endorser_did, ErrorCode::CommonInvalidParam3, DidValue);
check_useful_c_callback!(cb, ErrorCode::CommonInvalidParam4);
trace!("indy_append_request_endorser: entities >>> request_json: {:?},endorser_did: {:?}", request_json, endorser_did);
let result = CommandExecutor::instance()
.send(Command::Ledger(
LedgerCommand::AppendRequestEndorser(
request_json,
endorser_did,
boxed_callback_string!("indy_append_request_endorser", cb, command_handle)
)));
let res = prepare_result!(result);
trace!("indy_append_request_endorser: <<< res: {:?}", res);
res
} | 46.648703 | 166 | 0.612939 |
67716c3c5d692d5b8828a00a1e3a6efa749a2418 | 5,374 | use super::id_generation::TicketId;
use super::recap::Status;
use chrono::{DateTime, Utc};
use std::collections::HashMap;
/// We know that id and creation time will never be there before a ticket is saved,
/// while they will always be populated after `save` has been called.
///
/// The approach we followed in the previous koan has its limitations: every time we
/// access `id` and `created_at` we need to keep track of the "life stage" of our ticket.
/// Has it been saved yet? Is it safe to unwrap those `Option`s?
/// That is unnecessary cognitive load and leads to errors down the line,
/// when writing new code or refactoring existing functionality.
///
/// We can do better.
/// We can use types to better model our domain and constrain the behaviour of our code.
///
/// Before `TicketStore::save` is called, we are dealing with a `TicketDraft`.
/// No `created_at`, no `id`, no `status`.
/// On the other side, `TicketStore::get` will return a `Ticket`, with a `created_at` and
/// an `id`.
///
/// There will be no way to create a `Ticket` without passing through the store:
/// we will enforce `save` as the only way to produce a `Ticket` from a `TicketDraft`.
/// This will ensure as well that all tickets start in a `ToDo` status.
///
/// Less room for errors, less ambiguity, you can understand the domain constraints
/// by looking at the signatures of the functions in our code.
///
/// On the topic of type-driven development, checkout:
/// - https://fsharpforfunandprofit.com/series/designing-with-types.html
/// - https://lexi-lambda.github.io/blog/2019/11/05/parse-don-t-validate/
/// - https://www.youtube.com/watch?v=PLFl95c-IiU
///
#[derive(Debug, Clone, PartialEq)]
pub struct TicketDraft {
title: String,
description: String,
}
#[derive(Debug, Clone, PartialEq)]
pub struct Ticket {
id: TicketId,
title: String,
description: String,
status: Status,
created_at: DateTime<Utc>,
}
struct TicketStore {
data: HashMap<TicketId, Ticket>,
current_id: TicketId,
}
impl TicketStore {
pub fn new() -> TicketStore {
TicketStore {
data: HashMap::new(),
current_id: 0,
}
}
pub fn save(&mut self, draft: TicketDraft) -> TicketId {
let id = self.generate_id();
// We can use the "raw" constructor for `Ticket` here because the
// store is defined in the same module of `Ticket`.
// If you are importing `Ticket` from another module,
// `TicketStore::get` will indeed be the only way to get your hands on
// an instance of `Ticket`.
// This enforces our desired invariant: saving a draft in the store
// is the only way to "create" a `Ticket`.
let ticket = Ticket {
id,
title: draft.title,
description: draft.description,
status: Status::ToDo,
created_at: Utc::now(),
};
self.data.insert(id, ticket);
id
}
pub fn get(&self, id: &TicketId) -> Option<&Ticket> {
self.data.get(id)
}
fn generate_id(&mut self) -> TicketId {
self.current_id += 1;
self.current_id
}
}
impl TicketDraft {
pub fn title(&self) -> &String {
&self.title
}
pub fn description(&self) -> &String {
&self.description
}
}
impl Ticket {
pub fn title(&self) -> &String {
&self.title
}
pub fn description(&self) -> &String {
&self.description
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn created_at(&self) -> &DateTime<Utc> {
&self.created_at
}
pub fn id(&self) -> &TicketId {
&self.id
}
}
pub fn create_ticket_draft(title: String, description: String) -> TicketDraft {
if title.is_empty() {
panic!("Title cannot be empty!");
}
if title.len() > 50 {
panic!("A title cannot be longer than 50 characters!");
}
if description.len() > 3000 {
panic!("A description cannot be longer than 3000 characters!");
}
TicketDraft { title, description }
}
#[cfg(test)]
mod tests {
use super::*;
use fake::{Fake, Faker};
#[test]
fn a_ticket_with_a_home() {
let draft = generate_ticket_draft();
let mut store = TicketStore::new();
let ticket_id = store.save(draft.clone());
let retrieved_ticket = store.get(&ticket_id).unwrap();
assert_eq!(&ticket_id, retrieved_ticket.id());
assert_eq!(&draft.title, retrieved_ticket.title());
assert_eq!(&draft.description, retrieved_ticket.description());
assert_eq!(&Status::ToDo, retrieved_ticket.status());
}
#[test]
fn a_missing_ticket() {
let ticket_store = TicketStore::new();
let ticket_id = Faker.fake();
assert_eq!(ticket_store.get(&ticket_id), None);
}
#[test]
fn id_generation_is_monotonic() {
let n_tickets = 100;
let mut store = TicketStore::new();
for expected_id in 1..n_tickets {
let draft = generate_ticket_draft();
let ticket_id = store.save(draft);
assert_eq!(expected_id, ticket_id);
}
}
fn generate_ticket_draft() -> TicketDraft {
let description = (0..3000).fake();
let title = (1..50).fake();
create_ticket_draft(title, description)
}
}
| 29.690608 | 89 | 0.619836 |
676a7e0dee03bed51c7b2bde0690b6308950e900 | 17,427 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Paginator for [`DescribeCanaries`](crate::operation::DescribeCanaries)
pub struct DescribeCanariesPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::describe_canaries_input::Builder,
}
impl<C, M, R> DescribeCanariesPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::describe_canaries_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeCanariesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeCanariesError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeCanariesInputOperationOutputAlias,
crate::output::DescribeCanariesOutput,
crate::error::DescribeCanariesError,
crate::input::DescribeCanariesInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_canaries_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeCanariesLastRun`](crate::operation::DescribeCanariesLastRun)
pub struct DescribeCanariesLastRunPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::describe_canaries_last_run_input::Builder,
}
impl<C, M, R> DescribeCanariesLastRunPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::describe_canaries_last_run_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeCanariesLastRunOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeCanariesLastRunError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeCanariesLastRunInputOperationOutputAlias,
crate::output::DescribeCanariesLastRunOutput,
crate::error::DescribeCanariesLastRunError,
crate::input::DescribeCanariesLastRunInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_canaries_last_run_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeRuntimeVersions`](crate::operation::DescribeRuntimeVersions)
pub struct DescribeRuntimeVersionsPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::describe_runtime_versions_input::Builder,
}
impl<C, M, R> DescribeRuntimeVersionsPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::describe_runtime_versions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeRuntimeVersionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeRuntimeVersionsError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeRuntimeVersionsInputOperationOutputAlias,
crate::output::DescribeRuntimeVersionsOutput,
crate::error::DescribeRuntimeVersionsError,
crate::input::DescribeRuntimeVersionsInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_runtime_versions_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`GetCanaryRuns`](crate::operation::GetCanaryRuns)
pub struct GetCanaryRunsPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::get_canary_runs_input::Builder,
}
impl<C, M, R> GetCanaryRunsPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::get_canary_runs_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::GetCanaryRunsOutput,
aws_smithy_http::result::SdkError<crate::error::GetCanaryRunsError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetCanaryRunsInputOperationOutputAlias,
crate::output::GetCanaryRunsOutput,
crate::error::GetCanaryRunsError,
crate::input::GetCanaryRunsInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_get_canary_runs_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
| 42.196126 | 214 | 0.526195 |
4806c20c3a06f44eaec8edc20f0e36db452ec44a | 51 | pub use trigger::Trigger;
mod trigger;
mod watch;
| 10.2 | 25 | 0.745098 |
8715c74f03d604d16fd3f6b0eacf8ba4b9f41070 | 3,701 | use crate::desktop_context::DesktopContext;
use crate::user_window_events::UserWindowEvent;
use dioxus_core::*;
use std::{
collections::{HashMap, VecDeque},
sync::atomic::AtomicBool,
sync::{Arc, RwLock},
};
use wry::{
self,
application::{event_loop::ControlFlow, event_loop::EventLoopProxy, window::WindowId},
webview::WebView,
};
pub(super) struct DesktopController {
pub(super) webviews: HashMap<WindowId, WebView>,
pub(super) sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
pub(super) pending_edits: Arc<RwLock<VecDeque<String>>>,
pub(super) quit_app_on_close: bool,
pub(super) is_ready: Arc<AtomicBool>,
}
impl DesktopController {
// Launch the virtualdom on its own thread managed by tokio
// returns the desktop state
pub(super) fn new_on_tokio<P: Send + 'static>(
root: Component<P>,
props: P,
proxy: EventLoopProxy<UserWindowEvent>,
) -> Self {
let edit_queue = Arc::new(RwLock::new(VecDeque::new()));
let pending_edits = edit_queue.clone();
let (sender, receiver) = futures_channel::mpsc::unbounded::<SchedulerMsg>();
let return_sender = sender.clone();
let desktop_context_proxy = proxy.clone();
std::thread::spawn(move || {
// We create the runtime as multithreaded, so you can still "spawn" onto multiple threads
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap();
runtime.block_on(async move {
let mut dom =
VirtualDom::new_with_props_and_scheduler(root, props, (sender, receiver));
let window_context = DesktopContext::new(desktop_context_proxy);
dom.base_scope().provide_context(window_context);
let edits = dom.rebuild();
edit_queue
.write()
.unwrap()
.push_front(serde_json::to_string(&edits.edits).unwrap());
// Make sure the window is ready for any new updates
proxy.send_event(UserWindowEvent::Update).unwrap();
loop {
dom.wait_for_work().await;
let mut muts = dom.work_with_deadline(|| false);
while let Some(edit) = muts.pop() {
edit_queue
.write()
.unwrap()
.push_front(serde_json::to_string(&edit.edits).unwrap());
}
let _ = proxy.send_event(UserWindowEvent::Update);
}
})
});
Self {
pending_edits,
sender: return_sender,
webviews: HashMap::new(),
is_ready: Arc::new(AtomicBool::new(false)),
quit_app_on_close: true,
}
}
pub(super) fn close_window(&mut self, window_id: WindowId, control_flow: &mut ControlFlow) {
self.webviews.remove(&window_id);
if self.webviews.is_empty() && self.quit_app_on_close {
*control_flow = ControlFlow::Exit;
}
}
pub(super) fn try_load_ready_webviews(&mut self) {
if self.is_ready.load(std::sync::atomic::Ordering::Relaxed) {
let mut queue = self.pending_edits.write().unwrap();
let (_id, view) = self.webviews.iter_mut().next().unwrap();
while let Some(edit) = queue.pop_back() {
view.evaluate_script(&format!("window.interpreter.handleEdits({})", edit))
.unwrap();
}
}
}
}
| 34.268519 | 101 | 0.563091 |
d931208eef4fd106fe0f5586b820d3e265bc16d4 | 1,714 | #[derive(Debug)]
enum IpAddrKind {
V4,
V6,
}
struct IpAddr {
address: String,
kind: IpAddrKind,
}
enum IpAddr2 {
V4(String),
V6(String),
}
enum IpAddr3 {
V4(u8, u8, u8, u8),
V6(String),
}
// The following enum is equivalent to the subsequent 4 structs
#[derive(Debug)]
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(i32, i32, i32),
}
struct QuitMessage; // unit struct
struct MoveMessage {
x: i32,
y: i32,
}
struct WriteMessage(String); // tuple struct
struct ChangeColorMessage(i32, i32, i32); // tuple struct
// But we can define a fonction for all the variants of the enum at once
impl Message {
fn call(&self) {
// body
}
}
fn main() {
let _four = IpAddrKind::V4;
let _six = IpAddrKind::V6;
let home = IpAddr {
kind: IpAddrKind::V4,
address: String::from("127.0.0.1"),
};
println!("{} {:#?}", home.address, home.kind);
let _loopback = IpAddr {
kind: IpAddrKind::V6,
address: String::from("::1"),
};
let _home = IpAddr2::V4(String::from("127.0.0.1"));
let _loopback = IpAddr2::V6(String::from("::1"));
let _home = IpAddr3::V4(127, 0, 0, 1);
let _loopback = IpAddr3::V6(String::from("::1"));
let _qm = QuitMessage {};
let mm = MoveMessage { x: 1, y: 2 };
let _wm = WriteMessage(String::from("Hi"));
let _ccm = ChangeColorMessage(255, 12, 37);
println!("{} {}", mm.x, mm.y);
let m1 = Message::Quit;
let m2 = Message::Move { x: 1, y: 2 };
let m3 = Message::Write(String::from("Hi"));
let m4 = Message::ChangeColor(255, 12, 37);
m1.call();
m2.call();
m3.call();
m4.call();
}
| 20.650602 | 72 | 0.572345 |
f89e4bcd95fc84346af28799ff39d46accc42af9 | 5,807 | use lazy_static;
use clap;
use std::sync::RwLock;
use crate::paths;
use crate::fail::{HError, HResult, ErrorLog};
#[derive(Clone)]
// These are options, so we know if they have been set or not
struct ArgvConfig {
animation: Option<bool>,
show_hidden: Option<bool>,
icons: Option<bool>
}
impl ArgvConfig {
fn new() -> Self {
ArgvConfig {
animation: None,
show_hidden: None,
icons: None
}
}
}
lazy_static! {
static ref ARGV_CONFIG: RwLock<ArgvConfig> = RwLock::new(ArgvConfig::new());
}
pub fn set_argv_config(args: clap::ArgMatches) -> HResult<()> {
let animation = args.is_present("animation-off");
let show_hidden = args.is_present("show-hidden");
let icons = args.is_present("icons");
let mut config = ArgvConfig::new();
if animation == true {
config.animation = Some(false);
}
if show_hidden == true {
config.show_hidden = Some(true);
}
if icons == true {
config.icons = Some(true)
}
*ARGV_CONFIG.write()? = config;
Ok(())
}
fn get_argv_config() -> HResult<ArgvConfig> {
Ok(ARGV_CONFIG.try_read()?.clone())
}
fn infuse_argv_config(mut config: Config) -> Config {
let argv_config = get_argv_config().unwrap_or(ArgvConfig::new());
argv_config.animation.map(|val| config.animation = val);
argv_config.show_hidden.map(|val| config.show_hidden = val);
argv_config.icons.map(|val| config.icons = val);
config
}
#[derive(Debug, Clone)]
pub struct Config {
pub animation: bool,
pub animation_refresh_frequency: usize,
pub show_hidden: bool,
pub select_cmd: String,
pub cd_cmd: String,
pub icons: bool,
pub media_autoplay: bool,
pub media_mute: bool,
pub media_previewer: String,
pub ratios: Vec::<usize>
}
impl Config {
pub fn new() -> Config {
let config = Config::default();
infuse_argv_config(config)
}
pub fn default() -> Config {
Config {
animation: true,
animation_refresh_frequency: 60,
show_hidden: false,
select_cmd: "find -type f | fzf -m".to_string(),
cd_cmd: "find -type d | fzf".to_string(),
icons: false,
media_autoplay: false,
media_mute: false,
media_previewer: "hunter-media".to_string(),
ratios: vec![20,30,49]
}
}
pub fn load() -> HResult<Config> {
let config_path = paths::config_path()?;
if !config_path.exists() {
return Ok(infuse_argv_config(Config::new()));
}
let config_string = std::fs::read_to_string(config_path)?;
let config = config_string.lines().fold(Config::new(), |mut config, line| {
match Config::prep_line(line) {
Ok(("animation", "on")) => { config.animation = true; },
Ok(("animation", "off")) => { config.animation = false; },
Ok(("animation_refresh_frequency", frequency)) => {
match frequency.parse::<usize>() {
Ok(parsed_freq) => config.animation_refresh_frequency = parsed_freq,
_ => HError::config_error::<Config>(line.to_string()).log()
}
}
Ok(("show_hidden", "on")) => { config.show_hidden = true; },
Ok(("show_hidden", "off")) => { config.show_hidden = false; },
Ok(("icons", "on")) => config.icons = true,
Ok(("icons", "off")) => config.icons = false,
Ok(("select_cmd", cmd)) => {
let cmd = cmd.to_string();
config.select_cmd = cmd;
}
Ok(("cd_cmd", cmd)) => {
let cmd = cmd.to_string();
config.cd_cmd = cmd;
}
Ok(("media_autoplay", "on")) => { config.media_autoplay = true; },
Ok(("media_autoplay", "off")) => { config.media_autoplay = false; },
Ok(("media_mute", "on")) => { config.media_mute = true; },
Ok(("media_mute", "off")) => { config.media_mute = false; },
Ok(("media_previewer", cmd)) => {
let cmd = cmd.to_string();
config.media_previewer = cmd;
},
Ok(("ratios", ratios)) => {
let ratios_str = ratios.to_string();
if ratios_str.chars().all(|x| x.is_digit(10) || x.is_whitespace()
|| x == ':' || x == ',' ) {
let ratios: Vec<usize> = ratios_str.split([',', ':'].as_ref())
.map(|r| r.trim().parse::<usize>().unwrap()).collect();
let ratios_sum: usize = ratios.iter().sum();
if ratios.len() == 3 && ratios_sum > 0 && ratios.iter()
.filter(|&r| *r > u16::max_value() as usize).next() == None {
config.ratios = ratios;
}
}
}
_ => { HError::config_error::<Config>(line.to_string()).log(); }
}
config
});
let config = infuse_argv_config(config);
Ok(config)
}
fn prep_line<'a>(line: &'a str) -> HResult<(&'a str, &'a str)> {
let setting = line.split("=").collect::<Vec<&str>>();
if setting.len() == 2 {
Ok((setting[0], setting[1]))
} else {
HError::config_error(line.to_string())
}
}
pub fn animate(&self) -> bool {
self.animation
}
pub fn show_hidden(&self) -> bool {
self.show_hidden
}
}
| 30.888298 | 93 | 0.505597 |
9b927b83381d5f102b95f80a3c64dfdc4f50cf22 | 10,050 | use super::{StakePoolTemplate, WalletTemplate};
use crate::certificate::VoteAction;
use crate::ledger::governance::{ParametersGovernanceAction, TreasuryGovernanceAction};
use crate::testing::scenario::template::ExternalProposalId;
use crate::testing::scenario::template::ProposalDef;
use crate::testing::scenario::template::VotePlanDef;
use crate::{
date::BlockDate,
rewards::{Ratio, TaxType},
testing::data::Wallet,
testing::scenario::{scenario_builder::ScenarioBuilderError, template::StakePoolDef},
value::Value,
};
use std::{
collections::{HashMap, HashSet},
num::NonZeroU64,
};
#[derive(Clone, Debug)]
pub struct WalletTemplateBuilder {
alias: String,
delagate_alias: Option<String>,
ownership_alias: Option<String>,
initial_value: Option<Value>,
committee_member: bool,
}
impl WalletTemplateBuilder {
pub fn new(alias: &str) -> Self {
WalletTemplateBuilder {
alias: alias.to_owned(),
delagate_alias: None,
ownership_alias: None,
initial_value: None,
committee_member: false,
}
}
pub fn with(&mut self, value: u64) -> &mut Self {
self.initial_value = Some(Value(value));
self
}
pub fn owns(&mut self, ownership_alias: &str) -> &mut Self {
self.ownership_alias = Some(ownership_alias.to_owned());
self
}
pub fn delegates_to(&mut self, delegates_to_alias: &str) -> &mut Self {
self.delagate_alias = Some(delegates_to_alias.to_owned());
self
}
pub fn committee_member(&mut self) -> &mut Self {
self.committee_member = true;
self
}
pub fn owns_and_delegates_to(&mut self, ownership_alias: &str) -> &mut Self {
self.owns(ownership_alias).delegates_to(ownership_alias);
self
}
pub fn build(&self) -> Result<WalletTemplate, ScenarioBuilderError> {
let value = self
.initial_value
.ok_or(ScenarioBuilderError::UndefinedValueForWallet {
alias: self.alias.clone(),
})?;
Ok(WalletTemplate {
alias: self.alias.clone(),
stake_pool_delegate_alias: self.delagate_alias.clone(),
stake_pool_owner_alias: self.ownership_alias.clone(),
initial_value: value,
committee_member: self.committee_member,
})
}
}
pub struct StakePoolTemplateBuilder {
ownership_map: HashMap<String, HashSet<WalletTemplate>>,
delegation_map: HashMap<String, HashSet<WalletTemplate>>,
}
impl StakePoolTemplateBuilder {
pub fn new(initials: &[WalletTemplate]) -> Self {
StakePoolTemplateBuilder {
ownership_map: Self::build_ownersip_map(initials),
delegation_map: Self::build_delegation_map(initials),
}
}
pub fn build_stake_pool_templates(
&self,
wallets: Vec<Wallet>,
) -> Result<Vec<StakePoolTemplate>, ScenarioBuilderError> {
self.defined_stake_pools_aliases()
.iter()
.map(|stake_pool_alias| {
let owners = self.ownership_map.get(stake_pool_alias).ok_or(
ScenarioBuilderError::NoOwnersForStakePool {
alias: stake_pool_alias.to_string(),
},
)?;
let owners_public_keys = wallets
.iter()
.filter(|w| owners.iter().any(|u| u.alias() == w.alias()))
.map(|w| w.public_key())
.collect();
Ok(StakePoolTemplate {
alias: stake_pool_alias.to_string(),
owners: owners_public_keys,
})
})
.collect()
}
pub fn defined_stake_pools_aliases(&self) -> HashSet<String> {
self.ownership_map
.clone()
.into_iter()
.chain(self.delegation_map.clone())
.map(|(k, _)| k)
.collect()
}
fn build_ownersip_map(initials: &[WalletTemplate]) -> HashMap<String, HashSet<WalletTemplate>> {
let mut output: HashMap<String, HashSet<WalletTemplate>> = HashMap::new();
for wallet_template in initials.iter().filter(|w| w.owns_stake_pool().is_some()) {
let delegate_alias = wallet_template.owns_stake_pool().unwrap();
output
.entry(delegate_alias)
.or_default()
.insert(wallet_template.clone());
}
output
}
fn build_delegation_map(
initials: &[WalletTemplate],
) -> HashMap<String, HashSet<WalletTemplate>> {
let mut output: HashMap<String, HashSet<WalletTemplate>> = HashMap::new();
for wallet_template in initials
.iter()
.filter(|w| w.delegates_stake_pool().is_some())
{
let stake_pool_alias = wallet_template.delegates_stake_pool().unwrap();
output
.entry(stake_pool_alias)
.or_default()
.insert(wallet_template.clone());
}
output
}
}
#[derive(Clone, Debug)]
pub struct StakePoolDefBuilder {
alias: String,
permissions_threshold: u8,
reward_account: bool,
tax_type: Option<TaxType>,
}
impl StakePoolDefBuilder {
pub fn new(alias: &str) -> Self {
StakePoolDefBuilder {
alias: alias.to_owned(),
permissions_threshold: 1u8,
reward_account: false,
tax_type: None,
}
}
pub fn with_permissions_threshold(&mut self, threshold: u8) -> &mut Self {
self.permissions_threshold = threshold;
self
}
pub fn with_reward_account(&mut self, reward_account: bool) -> &mut Self {
self.reward_account = reward_account;
self
}
pub fn tax_ratio(&mut self, numerator: u64, denominator: u64) -> &mut Self {
self.tax_type = Some(TaxType {
fixed: Value(0),
ratio: Ratio {
numerator,
denominator: NonZeroU64::new(denominator).unwrap(),
},
max_limit: None,
});
self
}
pub fn tax_limit(&mut self, limit: u64) -> &mut Self {
match self.tax_type.as_mut() {
Some(tax_type) => tax_type.max_limit = Some(NonZeroU64::new(limit).unwrap()),
None => unreachable!("setting tax limit for none TaxType"),
};
self
}
pub fn fixed_tax(&mut self, value: u64) -> &mut Self {
self.tax_type = Some(TaxType {
fixed: Value(value),
ratio: Ratio::zero(),
max_limit: None,
});
self
}
pub fn no_tax(&mut self) -> &mut Self {
self.tax_type = Some(TaxType::zero());
self
}
pub fn build(&self) -> StakePoolDef {
StakePoolDef {
alias: self.alias.clone(),
permissions_threshold: Some(self.permissions_threshold),
has_reward_account: self.reward_account,
tax_type: self.tax_type,
}
}
}
#[derive(Clone, Debug)]
pub struct VotePlanDefBuilder {
alias: String,
owner_alias: Option<String>,
vote_date: Option<BlockDate>,
tally_date: Option<BlockDate>,
end_tally_date: Option<BlockDate>,
proposals: Vec<ProposalDef>,
}
impl VotePlanDefBuilder {
pub fn new(alias: &str) -> Self {
VotePlanDefBuilder {
alias: alias.to_owned(),
owner_alias: Option::None,
vote_date: Option::None,
tally_date: Option::None,
end_tally_date: Option::None,
proposals: Vec::new(),
}
}
pub fn owner(&mut self, owner_alias: &str) -> &mut Self {
self.owner_alias = Some(owner_alias.to_string());
self
}
pub fn consecutive_epoch_dates(&mut self) -> &mut Self {
self.vote_date = Some(BlockDate {
epoch: 0,
slot_id: 0,
});
self.tally_date = Some(BlockDate {
epoch: 1,
slot_id: 0,
});
self.end_tally_date = Some(BlockDate {
epoch: 2,
slot_id: 0,
});
self
}
pub fn with_proposal(&mut self, proposal_builder: &mut ProposalDefBuilder) -> &mut Self {
self.proposals.push(proposal_builder.clone().build());
self
}
pub fn build(self) -> VotePlanDef {
VotePlanDef {
alias: self.alias.clone(),
owner_alias: self.owner_alias.unwrap(),
vote_date: self.vote_date.unwrap(),
tally_date: self.tally_date.unwrap(),
end_tally_date: self.end_tally_date.unwrap(),
proposals: self.proposals,
}
}
}
#[derive(Clone, Debug)]
pub struct ProposalDefBuilder {
id: ExternalProposalId,
options: u8,
action_type: VoteAction,
}
impl ProposalDefBuilder {
pub fn new(id: ExternalProposalId) -> Self {
ProposalDefBuilder {
id,
options: 3,
action_type: VoteAction::OffChain,
}
}
pub fn options(&mut self, options: u8) -> &mut Self {
self.options = options;
self
}
pub fn action_off_chain(&mut self) -> &mut Self {
self.action_type = VoteAction::OffChain;
self
}
pub fn action_rewards_add(&mut self, value: u64) -> &mut Self {
self.action_type = VoteAction::Treasury {
action: TreasuryGovernanceAction::TransferToRewards {
value: Value(value),
},
};
self
}
pub fn action_trasfer_to_rewards(&mut self, value: u64) -> &mut Self {
self.action_type = VoteAction::Parameters {
action: ParametersGovernanceAction::RewardAdd {
value: Value(value),
},
};
self
}
pub fn build(self) -> ProposalDef {
ProposalDef {
id: self.id,
options: self.options,
action_type: self.action_type,
}
}
}
| 29.046243 | 100 | 0.574428 |
21ca7534353be772b41a0f43586e0fbfde24f1a4 | 26,323 | //! High-level types and functions related to CSS parsing
use std::{
num::ParseIntError,
fmt,
};
pub use simplecss::Error as CssSyntaxError;
use simplecss::Tokenizer;
use css_parser;
pub use css_parser::CssParsingError;
use azul_css::{
Css,
CssDeclaration,
DynamicCssProperty,
DynamicCssPropertyDefault,
CssPropertyType,
CssRuleBlock,
CssPath,
CssPathSelector,
CssPathPseudoSelector,
NodeTypePath,
NodeTypePathParseError,
};
/// Error that can happen during the parsing of a CSS value
#[derive(Debug, Clone, PartialEq)]
pub struct CssParseError<'a> {
pub error: CssParseErrorInner<'a>,
pub location: ErrorLocation,
}
#[derive(Debug, Clone, PartialEq)]
pub enum CssParseErrorInner<'a> {
/// A hard error in the CSS syntax
ParseError(CssSyntaxError),
/// Braces are not balanced properly
UnclosedBlock,
/// Invalid syntax, such as `#div { #div: "my-value" }`
MalformedCss,
/// Error parsing dynamic CSS property, such as
/// `#div { width: {{ my_id }} /* no default case */ }`
DynamicCssParseError(DynamicCssParseError<'a>),
/// Error while parsing a pseudo selector (like `:aldkfja`)
PseudoSelectorParseError(CssPseudoSelectorParseError<'a>),
/// The path has to be either `*`, `div`, `p` or something like that
NodeTypePath(NodeTypePathParseError<'a>),
/// A certain property has an unknown key, for example: `alsdfkj: 500px` = `unknown CSS key "alsdfkj: 500px"`
UnknownPropertyKey(&'a str, &'a str),
}
impl_display!{ CssParseErrorInner<'a>, {
ParseError(e) => format!("Parse Error: {:?}", e),
UnclosedBlock => "Unclosed block",
MalformedCss => "Malformed Css",
DynamicCssParseError(e) => format!("Error parsing dynamic CSS property: {}", e),
PseudoSelectorParseError(e) => format!("Failed to parse pseudo-selector: {}", e),
NodeTypePath(e) => format!("Failed to parse CSS selector path: {}", e),
UnknownPropertyKey(k, v) => format!("Unknown CSS key: \"{}: {}\"", k, v),
}}
impl_from! { DynamicCssParseError<'a>, CssParseErrorInner::DynamicCssParseError }
impl_from! { CssPseudoSelectorParseError<'a>, CssParseErrorInner::PseudoSelectorParseError }
impl_from! { NodeTypePathParseError<'a>, CssParseErrorInner::NodeTypePath }
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CssPseudoSelectorParseError<'a> {
UnknownSelector(&'a str),
InvalidNthChild(ParseIntError),
UnclosedBracesNthChild(&'a str),
}
impl<'a> From<ParseIntError> for CssPseudoSelectorParseError<'a> {
fn from(e: ParseIntError) -> Self { CssPseudoSelectorParseError::InvalidNthChild(e) }
}
impl_display! { CssPseudoSelectorParseError<'a>, {
UnknownSelector(e) => format!("Invalid CSS pseudo-selector: ':{}'", e),
InvalidNthChild(e) => format!("Invalid :nth-child pseudo-selector: ':{}'", e),
UnclosedBracesNthChild(e) => format!(":nth-child has unclosed braces: ':{}'", e),
}}
fn pseudo_selector_from_str<'a>(data: &'a str) -> Result<CssPathPseudoSelector, CssPseudoSelectorParseError<'a>> {
match data {
"first" => Ok(CssPathPseudoSelector::First),
"last" => Ok(CssPathPseudoSelector::Last),
"hover" => Ok(CssPathPseudoSelector::Hover),
"active" => Ok(CssPathPseudoSelector::Active),
"focus" => Ok(CssPathPseudoSelector::Focus),
other => {
// TODO: move this into a seperate function
if other.starts_with("nth-child") {
let mut nth_child = other.split("nth-child");
nth_child.next();
let mut nth_child_string = nth_child.next().ok_or(CssPseudoSelectorParseError::UnknownSelector(other))?;
nth_child_string.trim();
if !nth_child_string.starts_with("(") || !nth_child_string.ends_with(")") {
return Err(CssPseudoSelectorParseError::UnclosedBracesNthChild(other));
}
// Should the string be empty, then the `starts_with` and `ends_with` won't succeed
let mut nth_child_string = &nth_child_string[1..nth_child_string.len() - 1];
nth_child_string.trim();
let parsed = nth_child_string.parse::<usize>()?;
Ok(CssPathPseudoSelector::NthChild(parsed))
} else {
Err(CssPseudoSelectorParseError::UnknownSelector(other))
}
},
}
}
#[test]
fn test_css_pseudo_selector_parse() {
let ok_res = [
("first", CssPathPseudoSelector::First),
("last", CssPathPseudoSelector::Last),
("nth-child(4)", CssPathPseudoSelector::NthChild(4)),
("hover", CssPathPseudoSelector::Hover),
("active", CssPathPseudoSelector::Active),
("focus", CssPathPseudoSelector::Focus),
];
let err = [
("asdf", CssPseudoSelectorParseError::UnknownSelector("asdf")),
("", CssPseudoSelectorParseError::UnknownSelector("")),
("nth-child(", CssPseudoSelectorParseError::UnclosedBracesNthChild("nth-child(")),
("nth-child)", CssPseudoSelectorParseError::UnclosedBracesNthChild("nth-child)")),
// Can't test for ParseIntError because the fields are private.
// This is an example on why you shouldn't use std::error::Error!
];
for (s, a) in &ok_res {
assert_eq!(pseudo_selector_from_str(s), Ok(*a));
}
for (s, e) in &err {
assert_eq!(pseudo_selector_from_str(s), Err(e.clone()));
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct ErrorLocation {
pub line: usize,
pub column: usize,
}
impl<'a> fmt::Display for CssParseError<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CSS error at line {}:{}: {}", self.location.line, self.location.column, self.error)
}
}
pub fn new_from_str<'a>(css_string: &'a str) -> Result<Css, CssParseError<'a>> {
let mut tokenizer = Tokenizer::new(css_string);
match new_from_str_inner(css_string, &mut tokenizer) {
Ok(css) => Ok(css),
Err(e) => {
let error_location = tokenizer.pos().saturating_sub(1);
let line_number: usize = css_string[0..error_location].lines().count();
// Rust doesn't count "\n" as a character, so we have to add the line number count on top
let total_characters: usize = css_string[0..error_location].lines().take(line_number.saturating_sub(1)).map(|line| line.chars().count()).sum();
let total_characters = total_characters + line_number;
/*println!("line_number: {} error location: {}, total characters: {}", line_number,
error_location, total_characters);*/
let characters_in_line = (error_location + 2) - total_characters;
let error_location = ErrorLocation {
line: line_number,
column: characters_in_line,
};
Err(CssParseError {
error: e,
location: error_location,
})
}
}
}
/// Parses a CSS string (single-threaded) and returns the parsed rules in blocks
fn new_from_str_inner<'a>(css_string: &'a str, tokenizer: &mut Tokenizer<'a>) -> Result<Css, CssParseErrorInner<'a>> {
use simplecss::{Token, Combinator};
let mut css_blocks = Vec::new();
// Used for error checking / checking for closed braces
let mut parser_in_block = false;
let mut block_nesting = 0_usize;
// Current css paths (i.e. `div#id, .class, p` are stored here -
// when the block is finished, all `current_rules` gets duplicated with
// one path corresponding to one set of rules each).
let mut current_paths = Vec::new();
// Current CSS declarations
let mut current_rules = Vec::new();
// Keep track of the current path during parsing
let mut last_path = Vec::new();
let css_property_map = azul_css::get_css_key_map();
loop {
let tokenize_result = tokenizer.parse_next();
match tokenize_result {
Ok(token) => {
match token {
Token::BlockStart => {
if parser_in_block {
// multi-nested CSS blocks are currently not supported
return Err(CssParseErrorInner::MalformedCss);
}
parser_in_block = true;
block_nesting += 1;
current_paths.push(last_path.clone());
last_path.clear();
},
Token::Comma => {
current_paths.push(last_path.clone());
last_path.clear();
},
Token::BlockEnd => {
block_nesting -= 1;
if !parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
parser_in_block = false;
for path in current_paths.drain(..) {
css_blocks.push(CssRuleBlock {
path: CssPath { selectors: path },
declarations: current_rules.clone(),
})
}
current_rules.clear();
last_path.clear(); // technically unnecessary, but just to be sure
},
// tokens that adjust the last_path
Token::UniversalSelector => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::Global);
},
Token::TypeSelector(div_type) => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::Type(NodeTypePath::from_str(div_type)?));
},
Token::IdSelector(id) => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::Id(id.to_string()));
},
Token::ClassSelector(class) => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::Class(class.to_string()));
},
Token::Combinator(Combinator::GreaterThan) => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::DirectChildren);
},
Token::Combinator(Combinator::Space) => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::Children);
},
Token::PseudoClass(pseudo_class) => {
if parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
last_path.push(CssPathSelector::PseudoSelector(pseudo_selector_from_str(pseudo_class)?));
},
Token::Declaration(key, val) => {
if !parser_in_block {
return Err(CssParseErrorInner::MalformedCss);
}
let parsed_key = CssPropertyType::from_str(key, &css_property_map)
.ok_or(CssParseErrorInner::UnknownPropertyKey(key, val))?;
current_rules.push(determine_static_or_dynamic_css_property(parsed_key, val)?);
},
Token::EndOfStream => {
break;
},
_ => {
// attributes, lang-attributes and @keyframes are not supported
}
}
},
Err(e) => {
return Err(CssParseErrorInner::ParseError(e));
}
}
}
// non-even number of blocks
if block_nesting != 0 {
return Err(CssParseErrorInner::UnclosedBlock);
}
Ok(css_blocks.into())
}
/// Error that can happen during `css_parser::parse_key_value_pair`
#[derive(Debug, Clone, PartialEq)]
pub enum DynamicCssParseError<'a> {
/// The braces of a dynamic CSS property aren't closed or unbalanced, i.e. ` [[ `
UnclosedBraces,
/// There is a valid dynamic css property, but no default case
NoDefaultCase,
/// The dynamic CSS property has no ID, i.e. `[[ 400px ]]`
NoId,
/// The ID may not start with a number or be a CSS property itself
InvalidId,
/// Dynamic css property braces are empty, i.e. `[[ ]]`
EmptyBraces,
/// Unexpected value when parsing the string
UnexpectedValue(CssParsingError<'a>),
}
impl_display!{ DynamicCssParseError<'a>, {
UnclosedBraces => "The braces of a dynamic CSS property aren't closed or unbalanced, i.e. ` [[ `",
NoDefaultCase => "There is a valid dynamic css property, but no default case",
NoId => "The dynamic CSS property has no ID, i.e. [[ 400px ]]",
InvalidId => "The ID may not start with a number or be a CSS property itself",
EmptyBraces => "Dynamic css property braces are empty, i.e. `[[ ]]`",
UnexpectedValue(e) => format!("Unexpected value: {}", e),
}}
impl<'a> From<CssParsingError<'a>> for DynamicCssParseError<'a> {
fn from(e: CssParsingError<'a>) -> Self {
DynamicCssParseError::UnexpectedValue(e)
}
}
pub const START_BRACE: &str = "[[";
pub const END_BRACE: &str = "]]";
/// Determine if a Css property is static (immutable) or if it can change
/// during the runtime of the program
pub fn determine_static_or_dynamic_css_property<'a>(key: CssPropertyType, value: &'a str)
-> Result<CssDeclaration, DynamicCssParseError<'a>>
{
let value = value.trim();
let is_starting_with_braces = value.starts_with(START_BRACE);
let is_ending_with_braces = value.ends_with(END_BRACE);
match (is_starting_with_braces, is_ending_with_braces) {
(true, false) | (false, true) => {
Err(DynamicCssParseError::UnclosedBraces)
},
(true, true) => {
parse_dynamic_css_property(key, value).and_then(|val| Ok(CssDeclaration::Dynamic(val)))
},
(false, false) => {
Ok(CssDeclaration::Static(css_parser::parse_key_value_pair(key, value)?))
}
}
}
pub fn parse_dynamic_css_property<'a>(key: CssPropertyType, value: &'a str) -> Result<DynamicCssProperty, DynamicCssParseError<'a>> {
use std::char;
// "[[ id | 400px ]]" => "id | 400px"
let value = value.trim_left_matches(START_BRACE);
let value = value.trim_right_matches(END_BRACE);
let value = value.trim();
let mut pipe_split = value.splitn(2, "|");
let dynamic_id = pipe_split.next();
let default_case = pipe_split.next();
// note: dynamic_id will always be Some(), which is why the
let (default_case, dynamic_id) = match (default_case, dynamic_id) {
(Some(default), Some(id)) => (default, id),
(None, Some(id)) => {
if id.trim().is_empty() {
return Err(DynamicCssParseError::EmptyBraces);
} else if css_parser::parse_key_value_pair(key, id).is_ok() {
// if there is an ID, but the ID is a CSS value
return Err(DynamicCssParseError::NoId);
} else {
return Err(DynamicCssParseError::NoDefaultCase);
}
},
(None, None) | (Some(_), None) => unreachable!(), // iterator would be broken if this happened
};
let dynamic_id = dynamic_id.trim();
let default_case = default_case.trim();
match (dynamic_id.is_empty(), default_case.is_empty()) {
(true, true) => return Err(DynamicCssParseError::EmptyBraces),
(true, false) => return Err(DynamicCssParseError::NoId),
(false, true) => return Err(DynamicCssParseError::NoDefaultCase),
(false, false) => { /* everything OK */ }
}
if dynamic_id.starts_with(char::is_numeric) ||
css_parser::parse_key_value_pair(key, dynamic_id).is_ok() {
return Err(DynamicCssParseError::InvalidId);
}
let default_case_parsed = match default_case {
"auto" => DynamicCssPropertyDefault::Auto,
other => DynamicCssPropertyDefault::Exact(css_parser::parse_key_value_pair(key, other)?),
};
Ok(DynamicCssProperty {
property_type: key,
dynamic_id: dynamic_id.to_string(),
default: default_case_parsed,
})
}
#[test]
fn test_detect_static_or_dynamic_property() {
use azul_css::{CssProperty, StyleTextAlignmentHorz};
use css_parser::InvalidValueErr;
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, " center "),
Ok(CssDeclaration::Static(CssProperty::TextAlign(StyleTextAlignmentHorz::Center)))
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ 400px ]]"),
Err(DynamicCssParseError::NoDefaultCase)
);
assert_eq!(determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ 400px"),
Err(DynamicCssParseError::UnclosedBraces)
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ 400px | center ]]"),
Err(DynamicCssParseError::InvalidId)
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ hello | center ]]"),
Ok(CssDeclaration::Dynamic(DynamicCssProperty {
property_type: CssPropertyType::TextAlign,
default: DynamicCssPropertyDefault::Exact(CssProperty::TextAlign(StyleTextAlignmentHorz::Center)),
dynamic_id: String::from("hello"),
}))
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ hello | auto ]]"),
Ok(CssDeclaration::Dynamic(DynamicCssProperty {
property_type: CssPropertyType::TextAlign,
default: DynamicCssPropertyDefault::Auto,
dynamic_id: String::from("hello"),
}))
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ abc | hello ]]"),
Err(DynamicCssParseError::UnexpectedValue(
CssParsingError::InvalidValueErr(InvalidValueErr("hello"))
))
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ ]]"),
Err(DynamicCssParseError::EmptyBraces)
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[]]"),
Err(DynamicCssParseError::EmptyBraces)
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ center ]]"),
Err(DynamicCssParseError::NoId)
);
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ hello | ]]"),
Err(DynamicCssParseError::NoDefaultCase)
);
// debatable if this is a suitable error for this case:
assert_eq!(
determine_static_or_dynamic_css_property(CssPropertyType::TextAlign, "[[ | ]]"),
Err(DynamicCssParseError::EmptyBraces)
);
}
#[test]
fn test_css_parse_1() {
use azul_css::{ColorU, StyleBackgroundColor, NodeTypePath, CssProperty};
let parsed_css = new_from_str("
div#my_id .my_class:first {
background-color: red;
}
").unwrap();
let expected_css_rules = vec![
CssRuleBlock {
path: CssPath {
selectors: vec![
CssPathSelector::Type(NodeTypePath::Div),
CssPathSelector::Id(String::from("my_id")),
CssPathSelector::Children,
// NOTE: This is technically wrong, the space between "#my_id"
// and ".my_class" is important, but gets ignored for now
CssPathSelector::Class(String::from("my_class")),
CssPathSelector::PseudoSelector(CssPathPseudoSelector::First),
],
},
declarations: vec![CssDeclaration::Static(CssProperty::BackgroundColor(StyleBackgroundColor(ColorU { r: 255, g: 0, b: 0, a: 255 })))],
}
];
assert_eq!(parsed_css, expected_css_rules.into());
}
#[test]
fn test_css_simple_selector_parse() {
use self::CssPathSelector::*;
use azul_css::NodeTypePath;
let css = "div#id.my_class > p .new { }";
let parsed = vec![
Type(NodeTypePath::Div),
Id("id".into()),
Class("my_class".into()),
DirectChildren,
Type(NodeTypePath::P),
Children,
Class("new".into())
];
assert_eq!(new_from_str(css).unwrap(), Css {
rules: vec![CssRuleBlock {
path: CssPath { selectors: parsed },
declarations: Vec::new(),
}],
});
}
#[cfg(test)]
mod stylesheet_parse {
use azul_css::*;
use super::*;
fn test_css(css: &str, expected: Vec<CssRuleBlock>) {
let css = new_from_str(css).unwrap();
assert_eq!(css, expected.into());
}
// Tests that an element with a single class always gets the CSS element applied properly
#[test]
fn test_apply_css_pure_class() {
let red = CssProperty::BackgroundColor(StyleBackgroundColor(ColorU { r: 255, g: 0, b: 0, a: 255 }));
let blue = CssProperty::BackgroundColor(StyleBackgroundColor(ColorU { r: 0, g: 0, b: 255, a: 255 }));
let black = CssProperty::BackgroundColor(StyleBackgroundColor(ColorU { r: 0, g: 0, b: 0, a: 255 }));
// Simple example
{
let css_1 = ".my_class { background-color: red; }";
let expected_rules = vec![
CssRuleBlock {
path: CssPath { selectors: vec![CssPathSelector::Class("my_class".into())] },
declarations: vec![
CssDeclaration::Static(red.clone())
],
},
];
test_css(css_1, expected_rules);
}
// Slightly more complex example
{
let css_2 = "#my_id { background-color: red; } .my_class { background-color: blue; }";
let expected_rules = vec![
CssRuleBlock {
path: CssPath { selectors: vec![CssPathSelector::Id("my_id".into())] },
declarations: vec![CssDeclaration::Static(red.clone())]
},
CssRuleBlock {
path: CssPath { selectors: vec![CssPathSelector::Class("my_class".into())] },
declarations: vec![CssDeclaration::Static(blue.clone())]
},
];
test_css(css_2, expected_rules);
}
// Even more complex example
{
let css_3 = "* { background-color: black; } .my_class#my_id { background-color: red; } .my_class { background-color: blue; }";
let expected_rules = vec![
CssRuleBlock {
path: CssPath { selectors: vec![CssPathSelector::Global] },
declarations: vec![CssDeclaration::Static(black.clone())]
},
CssRuleBlock {
path: CssPath { selectors: vec![CssPathSelector::Class("my_class".into()), CssPathSelector::Id("my_id".into())] },
declarations: vec![CssDeclaration::Static(red.clone())]
},
CssRuleBlock {
path: CssPath { selectors: vec![CssPathSelector::Class("my_class".into())] },
declarations: vec![CssDeclaration::Static(blue.clone())]
},
];
test_css(css_3, expected_rules);
}
}
}
// Assert that order of the style rules is correct (in same order as provided in CSS form)
#[test]
fn test_multiple_rules() {
use azul_css::*;
use self::CssPathSelector::*;
let parsed_css = new_from_str("
* { }
* div.my_class#my_id { }
* div#my_id { }
* #my_id { }
div.my_class.specific#my_id { }
").unwrap();
let expected_rules = vec![
// Rules are sorted by order of appearance in source string
CssRuleBlock { path: CssPath { selectors: vec![Global] }, declarations: Vec::new() },
CssRuleBlock { path: CssPath { selectors: vec![Global, Type(NodeTypePath::Div), Class("my_class".into()), Id("my_id".into())] }, declarations: Vec::new() },
CssRuleBlock { path: CssPath { selectors: vec![Global, Type(NodeTypePath::Div), Id("my_id".into())] }, declarations: Vec::new() },
CssRuleBlock { path: CssPath { selectors: vec![Global, Id("my_id".into())] }, declarations: Vec::new() },
CssRuleBlock { path: CssPath { selectors: vec![Type(NodeTypePath::Div), Class("my_class".into()), Class("specific".into()), Id("my_id".into())] }, declarations: Vec::new() },
];
assert_eq!(parsed_css, expected_rules.into());
}
| 40.621914 | 183 | 0.569654 |
e65f2c8a4f4800063a8a13218515ee5ad0b0e069 | 7,435 | // Take a look at the license at the top of the repository in the LICENSE file.
//! # Cairo bindings
//!
//! This library contains safe Rust bindings for [Cairo](https://www.cairographics.org/).
//! It is a part of [gtk-rs](https://gtk-rs.org/).
//!
//! Cairo 1.14 is the lowest supported version for the underlying library.
//!
//! # Crate features
//!
//! ## Default-on features
//!
//! * **use_glib** - Use with [glib](mod@glib)
//!
//! ## Fileformat features
//!
//! * **png** - Reading and writing PNG images
//! * **pdf** - Rendering PDF documents
//! * **svg** - Rendering SVG documents
//! * **ps** - Rendering PostScript documents
//!
//! ## Cairo API version features
//!
//! * **v1_16** - Use Cairo 1.16 APIs
//!
//! ## Documentation features
//!
//! * **dox** - Used to keep system dependent items in documentation
//!
//! ## X Window features
//!
//! * **xcb** - X Window System rendering using the XCB library
//! * **xlib** - X Window System rendering using XLib
//!
//! ## Windows API features
//!
//! * **win32-surface** - Microsoft Windows surface support
#![cfg_attr(feature = "dox", feature(doc_cfg))]
#![allow(clippy::missing_safety_doc)]
pub use ffi;
#[cfg(feature = "freetype")]
pub use freetype_crate as freetype;
#[cfg(feature = "use_glib")]
pub use glib;
// Helper macro for our GValue related trait impls
#[cfg(feature = "use_glib")]
macro_rules! gvalue_impl {
($name:ty, $ffi_name:ty, $get_type:expr) => {
#[allow(unused_imports)]
use glib::translate::*;
impl glib::types::StaticType for $name {
fn static_type() -> glib::types::Type {
unsafe { from_glib($get_type()) }
}
}
impl glib::value::ValueType for $name {
type Type = Self;
}
unsafe impl<'a> glib::value::FromValue<'a> for $name {
type Checker = glib::value::GenericValueTypeOrNoneChecker<Self>;
unsafe fn from_value(value: &'a glib::Value) -> Self {
let ptr = glib::gobject_ffi::g_value_get_boxed(
glib::translate::ToGlibPtr::to_glib_none(value).0,
);
assert!(!ptr.is_null());
<$name as glib::translate::FromGlibPtrNone<*mut $ffi_name>>::from_glib_none(
ptr as *mut $ffi_name,
)
}
}
impl glib::value::ToValue for $name {
fn to_value(&self) -> glib::Value {
unsafe {
let mut value =
glib::Value::from_type(<$name as glib::StaticType>::static_type());
glib::gobject_ffi::g_value_set_boxed(
value.to_glib_none_mut().0,
self.to_glib_none().0 as *mut _,
);
value
}
}
fn value_type(&self) -> glib::Type {
<$name as glib::StaticType>::static_type()
}
}
impl glib::value::ToValueOptional for $name {
fn to_value_optional(s: Option<&Self>) -> glib::Value {
let mut value = glib::Value::for_value_type::<Self>();
unsafe {
glib::gobject_ffi::g_value_take_boxed(
value.to_glib_none_mut().0,
glib::translate::ToGlibPtr::to_glib_full(&s) as *mut _,
);
}
value
}
}
};
}
pub use crate::user_data::UserDataKey;
pub use crate::context::{Context, RectangleList};
pub use crate::paths::{Path, PathSegment, PathSegments};
pub use crate::device::Device;
pub use crate::enums::*;
pub use crate::error::{BorrowError, Error, IoError};
pub use crate::patterns::{
Gradient, LinearGradient, Mesh, Pattern, RadialGradient, SolidPattern, SurfacePattern,
};
pub use crate::font::{
FontExtents, FontFace, FontOptions, FontSlant, FontType, FontWeight, Glyph, ScaledFont,
TextCluster, TextExtents,
};
pub use crate::matrices::Matrix;
pub use crate::recording_surface::RecordingSurface;
pub use crate::rectangle::Rectangle;
pub use crate::rectangle_int::RectangleInt;
pub use crate::region::Region;
pub use crate::surface::{MappedImageSurface, Surface};
pub use crate::image_surface::{ImageSurface, ImageSurfaceData};
#[cfg(any(feature = "pdf", feature = "svg", feature = "ps", feature = "dox"))]
pub use stream::StreamWithError;
#[cfg(any(feature = "pdf", feature = "dox"))]
pub use pdf::PdfSurface;
#[cfg(any(feature = "ps", feature = "dox"))]
pub use ps::PsSurface;
#[cfg(any(feature = "svg", feature = "dox"))]
pub use svg::SvgSurface;
#[cfg(any(feature = "xcb", feature = "dox"))]
pub use xcb::{
XCBConnection, XCBDrawable, XCBPixmap, XCBRenderPictFormInfo, XCBScreen, XCBSurface,
XCBVisualType,
};
#[macro_use]
mod surface_macros;
#[macro_use]
mod user_data;
mod constants;
pub use crate::constants::*;
mod utils;
pub use crate::utils::{debug_reset_static_data, version_string, Version};
mod context;
mod device;
mod enums;
mod error;
mod font;
mod image_surface;
#[cfg(any(feature = "png", feature = "dox"))]
mod image_surface_png;
mod matrices;
mod paths;
mod patterns;
mod recording_surface;
mod rectangle;
mod rectangle_int;
mod region;
mod surface;
#[cfg(any(feature = "xcb", feature = "dox"))]
mod xcb;
#[cfg(any(feature = "pdf", feature = "svg", feature = "ps", feature = "dox"))]
#[macro_use]
mod stream;
#[cfg(any(feature = "pdf", feature = "dox"))]
mod pdf;
#[cfg(any(feature = "ps", feature = "dox"))]
mod ps;
#[cfg(any(feature = "svg", feature = "dox"))]
mod svg;
#[cfg(any(target_os = "macos", target_os = "ios", feature = "dox"))]
mod quartz_surface;
#[cfg(any(target_os = "macos", target_os = "ios", feature = "dox"))]
pub use quartz_surface::QuartzSurface;
#[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))]
mod win32_surface;
#[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))]
pub use win32_surface::Win32Surface;
#[cfg(not(feature = "use_glib"))]
mod borrowed {
use std::mem;
/// Wrapper around values representing borrowed C memory.
///
/// This is returned by `from_glib_borrow()` and ensures that the wrapped value
/// is never dropped when going out of scope.
///
/// Borrowed values must never be passed by value or mutable reference to safe Rust code and must
/// not leave the C scope in which they are valid.
#[derive(Debug)]
pub struct Borrowed<T>(mem::ManuallyDrop<T>);
impl<T> Borrowed<T> {
/// Creates a new borrowed value.
pub fn new(val: T) -> Self {
Self(mem::ManuallyDrop::new(val))
}
/// Extracts the contained value.
///
/// The returned value must never be dropped and instead has to be passed to `mem::forget()` or
/// be directly wrapped in `mem::ManuallyDrop` or another `Borrowed` wrapper.
pub unsafe fn into_inner(self) -> T {
mem::ManuallyDrop::into_inner(self.0)
}
}
impl<T> AsRef<T> for Borrowed<T> {
fn as_ref(&self) -> &T {
&*self.0
}
}
impl<T> std::ops::Deref for Borrowed<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
}
#[cfg(not(feature = "use_glib"))]
pub use borrowed::Borrowed;
#[cfg(feature = "use_glib")]
pub(crate) use glib::translate::Borrowed;
| 28.377863 | 103 | 0.597579 |
bbeae41b8bb95bb45344f12817cc766eb5f75dc7 | 363 | // Meta test for compiletest: check that when we give the right error
// patterns, the test passes. See all `revision-bad.rs`.
// run-fail
// revisions: foo bar
//[foo] error-pattern:foo
//[bar] error-pattern:bar
// ignore-emscripten no processes
#[cfg(foo)]
fn die() {
panic!("foo");
}
#[cfg(bar)]
fn die() {
panic!("bar");
}
fn main() {
die();
}
| 16.5 | 69 | 0.62259 |
fc39ce1376102c76261a3417d5abe11e578a3e6c | 1,434 | /// def omega2(dim, cap):
/// if dim == 1:
/// yield [cap]
/// else:
/// for x in range(cap + 1):
/// for prefix in omega2(dim - 1, cap - x):
/// yield [x] + prefix
#[derive(Debug)]
pub struct Omega {
dim: u64,
cap: u64,
x: u64,
prefix: Option<Box<Omega>>,
}
impl Omega {
pub fn new(dim: u64, cap: u64) -> Omega {
let prefix = if dim == 1 {
None
} else {
Some(Box::new(Omega::new(dim - 1, cap)))
};
let res = Omega {
dim,
cap,
x: 0,
prefix,
};
assert!(!res.done());
res
}
pub fn step(&mut self) {
if self.dim == 1 {
self.x = self.cap + 1;
} else {
self.prefix.as_mut().unwrap().step();
if self.prefix.as_ref().map(|p| p.done()).unwrap_or(false) {
self.x += 1;
if self.x <= self.cap {
self.prefix = Some(Box::new(Omega::new(self.dim - 1, self.cap - self.x)));
}
}
}
}
pub fn set_val(&self, tgt: &mut [u64]) {
if self.dim == 1 {
tgt[0] = self.cap;
} else {
self.prefix.as_ref().unwrap().set_val(tgt);
tgt[(self.dim - 1) as usize] = self.x;
}
}
pub fn done(&self) -> bool {
self.x > self.cap
}
}
| 23.9 | 94 | 0.40516 |
edb9bcf51d799f64238cda1038cabf2411a3d204 | 3,874 | use gimli;
use ir;
use traits;
use super::die_parse::DieItemsExtra;
use super::Parse;
pub struct CompUnitItemsExtra<'input, R>
where
R: 'input + gimli::Reader,
{
pub unit_id: usize,
pub debug_abbrev: gimli::DebugAbbrev<R>,
pub debug_str: gimli::DebugStr<R>,
pub rnglists: &'input gimli::RangeLists<R>,
}
pub struct CompUnitEdgesExtra<R>
where
R: gimli::Reader,
{
pub unit_id: usize,
pub debug_abbrev: gimli::DebugAbbrev<R>,
}
impl<'input, R> Parse<'input> for gimli::CompilationUnitHeader<R, R::Offset>
where
R: 'input + gimli::Reader,
{
type ItemsExtra = CompUnitItemsExtra<'input, R>;
fn parse_items(
&self,
items: &mut ir::ItemsBuilder,
extra: Self::ItemsExtra,
) -> Result<(), traits::Error> {
// Destructure the extra information needed to parse items in the unit.
let Self::ItemsExtra {
unit_id,
debug_abbrev,
debug_str,
rnglists,
} = extra;
// Get the size of addresses in this type-unit, initialize an entry ID counter.
let addr_size: u8 = self.address_size();
let dwarf_version: u16 = self.version();
let mut entry_id = 0;
// Find the abbreviations associated with this compilation unit.
// Use the abbreviations to create an entries cursor, and move it to the root.
let abbrevs = self.abbreviations(&debug_abbrev)?;
let mut die_cursor = self.entries(&abbrevs);
if die_cursor.next_dfs()?.is_none() {
let e = traits::Error::with_msg(
"Unexpected error while traversing debugging information entries.",
);
return Err(e);
}
// Parse the contained debugging information entries in depth-first order.
let mut depth = 0;
while let Some((delta, entry)) = die_cursor.next_dfs()? {
// Update depth value, and break out of the loop when we
// return to the original starting position.
depth += delta;
if depth <= 0 {
break;
}
let die_extra = DieItemsExtra {
entry_id,
unit_id,
addr_size,
dwarf_version,
debug_str: &debug_str,
rnglists,
};
entry.parse_items(items, die_extra)?;
entry_id += 1;
}
Ok(())
}
type EdgesExtra = CompUnitEdgesExtra<R>;
fn parse_edges(
&self,
items: &mut ir::ItemsBuilder,
extra: Self::EdgesExtra,
) -> Result<(), traits::Error> {
let Self::EdgesExtra {
unit_id,
debug_abbrev,
} = extra;
// Initialize an entry ID counter.
let mut entry_id = 0;
// Find the abbreviations associated with this compilation unit.
// Use the abbreviations to create an entries cursor, and move it to the root.
let abbrevs = self.abbreviations(&debug_abbrev)?;
let mut die_cursor = self.entries(&abbrevs);
if die_cursor.next_dfs()?.is_none() {
let e = traits::Error::with_msg(
"Unexpected error while traversing debugging information entries.",
);
return Err(e);
}
// Parse the contained debugging information entries in depth-first order.
let mut depth = 0;
while let Some((delta, entry)) = die_cursor.next_dfs()? {
// Update depth value, and break out of the loop when we
// return to the original starting position.
depth += delta;
if depth <= 0 {
break;
}
let _ir_id = ir::Id::entry(unit_id, entry_id);
entry.parse_edges(items, ())?;
entry_id += 1;
}
Ok(())
}
}
| 29.348485 | 87 | 0.561693 |
d5330605e43368ce7c0a1048a6971cfd8560f8db | 217 | pub struct Config {
pub with_count: bool,
}
impl Config {
pub fn new(with_count: bool) -> Self {
Config { with_count }
}
pub fn is_valid(&self) -> bool {
// TODO
true
}
}
| 14.466667 | 42 | 0.520737 |
034cf7c06c63358f009eec721549032acaa92e74 | 14,014 | #[cfg(test)]
#[path = "../../../tests/unit/format/problem/reader_test.rs"]
mod reader_test;
#[path = "./job_reader.rs"]
mod job_reader;
#[path = "./fleet_reader.rs"]
mod fleet_reader;
#[path = "./objective_reader.rs"]
mod objective_reader;
use self::fleet_reader::{create_transport_costs, read_fleet, read_travel_limits};
use self::job_reader::{read_jobs_with_extra_locks, read_locks};
use self::objective_reader::create_objective;
use crate::constraints::*;
use crate::extensions::{get_route_modifier, OnlyVehicleActivityCost};
use crate::format::coord_index::CoordIndex;
use crate::format::problem::{deserialize_matrix, deserialize_problem, Matrix};
use crate::format::*;
use crate::utils::get_approx_transportation;
use crate::validation::ValidationContext;
use crate::{get_unique_locations, parse_time};
use hashbrown::HashSet;
use std::cmp::Ordering::Equal;
use std::io::{BufReader, Read};
use std::sync::Arc;
use vrp_core::construction::constraints::*;
use vrp_core::models::common::{MultiDimLoad, SingleDimLoad, TimeWindow, ValueDimension};
use vrp_core::models::problem::{ActivityCost, Fleet, TransportCost};
use vrp_core::models::{Extras, Lock, Problem};
use vrp_core::utils::{compare_floats, DefaultRandom, Random};
pub type ApiProblem = crate::format::problem::Problem;
/// Reads specific problem definition from various sources.
pub trait PragmaticProblem {
/// Reads problem defined in pragmatic format.
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>>;
}
impl<R: Read> PragmaticProblem for (BufReader<R>, Vec<BufReader<R>>) {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
let problem = deserialize_problem(self.0)?;
let mut matrices = vec![];
for matrix in self.1 {
matrices.push(deserialize_matrix(matrix)?);
}
map_to_problem_with_matrices(problem, matrices)
}
}
impl<R: Read> PragmaticProblem for BufReader<R> {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
let problem = deserialize_problem(self)?;
map_to_problem_with_approx(problem)
}
}
impl PragmaticProblem for (String, Vec<String>) {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
let problem = deserialize_problem(BufReader::new(self.0.as_bytes()))?;
let mut matrices = vec![];
for matrix in self.1 {
matrices.push(deserialize_matrix(BufReader::new(matrix.as_bytes()))?);
}
map_to_problem_with_matrices(problem, matrices)
}
}
impl PragmaticProblem for String {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
let problem = deserialize_problem(BufReader::new(self.as_bytes()))?;
map_to_problem_with_approx(problem)
}
}
impl PragmaticProblem for (ApiProblem, Vec<Matrix>) {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
map_to_problem_with_matrices(self.0, self.1)
}
}
impl PragmaticProblem for ApiProblem {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
map_to_problem_with_approx(self)
}
}
impl PragmaticProblem for (ApiProblem, Option<Vec<Matrix>>) {
fn read_pragmatic(self) -> Result<Problem, Vec<FormatError>> {
if let Some(matrices) = self.1 {
(self.0, matrices).read_pragmatic()
} else {
self.0.read_pragmatic()
}
}
}
pub struct ProblemProperties {
has_multi_dimen_capacity: bool,
has_breaks: bool,
has_skills: bool,
has_unreachable_locations: bool,
has_dispatch: bool,
has_reloads: bool,
has_priorities: bool,
has_area_limits: bool,
has_tour_size_limits: bool,
max_job_value: Option<f64>,
}
fn create_approx_matrices(problem: &ApiProblem) -> Vec<Matrix> {
const DEFAULT_SPEED: f64 = 10.;
// get each speed value once
let speeds = problem
.fleet
.profiles
.iter()
.map(|profile| profile.speed.unwrap_or(DEFAULT_SPEED))
.map(|speed| speed.to_bits())
.collect::<HashSet<u64>>();
let speeds = speeds.into_iter().map(f64::from_bits).collect::<Vec<_>>();
let locations = get_unique_locations(&problem);
let approx_data = get_approx_transportation(&locations, speeds.as_slice());
problem
.fleet
.profiles
.iter()
.map(move |profile| {
let speed = profile.speed.clone().unwrap_or(DEFAULT_SPEED);
let idx =
speeds.iter().position(|s| compare_floats(*s, speed) == Equal).expect("Cannot find profile speed");
Matrix {
profile: Some(profile.name.clone()),
timestamp: None,
travel_times: approx_data[idx].0.clone(),
distances: approx_data[idx].1.clone(),
error_codes: None,
}
})
.collect()
}
fn map_to_problem_with_approx(problem: ApiProblem) -> Result<Problem, Vec<FormatError>> {
let coord_index = CoordIndex::new(&problem);
let matrices = if coord_index.get_used_types().1 { vec![] } else { create_approx_matrices(&problem) };
map_to_problem(problem, matrices, coord_index)
}
fn map_to_problem_with_matrices(problem: ApiProblem, matrices: Vec<Matrix>) -> Result<Problem, Vec<FormatError>> {
let coord_index = CoordIndex::new(&problem);
map_to_problem(problem, matrices, coord_index)
}
fn map_to_problem(
api_problem: ApiProblem,
matrices: Vec<Matrix>,
coord_index: CoordIndex,
) -> Result<Problem, Vec<FormatError>> {
ValidationContext::new(&api_problem, Some(&matrices)).validate()?;
let problem_props = get_problem_properties(&api_problem, &matrices);
let coord_index = Arc::new(coord_index);
let transport = create_transport_costs(&api_problem, &matrices).map_err(|err| {
vec![FormatError::new(
"E0002".to_string(),
"cannot create transport costs".to_string(),
format!("Check matrix routing data: '{}'", err),
)]
})?;
let activity = Arc::new(OnlyVehicleActivityCost::default());
let fleet = read_fleet(&api_problem, &problem_props, &coord_index);
// TODO pass random from outside as there might be need to have it initialized with seed
// at the moment, this random instance is used only by multi job permutation generator
let random: Arc<dyn Random + Send + Sync> = Arc::new(DefaultRandom::default());
let mut job_index = Default::default();
let (jobs, locks) = read_jobs_with_extra_locks(
&api_problem,
&problem_props,
&coord_index,
&fleet,
&transport,
&mut job_index,
&random,
);
let locks = locks.into_iter().chain(read_locks(&api_problem, &job_index).into_iter()).collect::<Vec<_>>();
let limits = read_travel_limits(&api_problem).unwrap_or_else(|| Arc::new(|_| (None, None)));
let mut constraint = create_constraint_pipeline(
coord_index.clone(),
&fleet,
transport.clone(),
activity.clone(),
&problem_props,
&locks,
limits,
);
let objective = create_objective(&api_problem, &mut constraint, &problem_props);
let constraint = Arc::new(constraint);
let extras = Arc::new(create_extras(constraint.clone(), &problem_props, job_index, coord_index));
Ok(Problem {
fleet: Arc::new(fleet),
jobs: Arc::new(jobs),
locks,
constraint,
activity,
transport,
objective,
extras,
})
}
fn create_constraint_pipeline(
coord_index: Arc<CoordIndex>,
fleet: &Fleet,
transport: Arc<dyn TransportCost + Send + Sync>,
activity: Arc<dyn ActivityCost + Send + Sync>,
props: &ProblemProperties,
locks: &[Arc<Lock>],
limits: TravelLimitFunc,
) -> ConstraintPipeline {
let mut constraint = ConstraintPipeline::default();
constraint.add_module(Box::new(TransportConstraintModule::new(
transport.clone(),
activity.clone(),
limits,
TIME_CONSTRAINT_CODE,
DISTANCE_LIMIT_CONSTRAINT_CODE,
DURATION_LIMIT_CONSTRAINT_CODE,
)));
add_capacity_module(&mut constraint, &props, transport.clone());
if props.has_breaks {
constraint.add_module(Box::new(BreakModule::new(transport.clone(), BREAK_CONSTRAINT_CODE)));
}
if props.has_skills {
constraint.add_module(Box::new(SkillsModule::new(SKILL_CONSTRAINT_CODE)));
}
if props.has_dispatch {
constraint.add_module(Box::new(DispatchModule::new(DISPATCH_CONSTRAINT_CODE)));
}
if props.has_priorities {
constraint.add_module(Box::new(PriorityModule::new(PRIORITY_CONSTRAINT_CODE)));
}
if !locks.is_empty() {
constraint.add_module(Box::new(StrictLockingModule::new(fleet, locks, LOCKING_CONSTRAINT_CODE)));
}
if props.has_unreachable_locations {
constraint.add_module(Box::new(ReachableModule::new(transport.clone(), REACHABLE_CONSTRAINT_CODE)));
}
if props.has_tour_size_limits {
add_tour_size_module(&mut constraint)
}
if props.has_area_limits {
add_area_module(&mut constraint, coord_index);
}
constraint
}
fn add_capacity_module(
constraint: &mut ConstraintPipeline,
props: &ProblemProperties,
transport: Arc<dyn TransportCost + Send + Sync>,
) {
constraint.add_module(if props.has_reloads {
let threshold = 0.9;
if props.has_multi_dimen_capacity {
Box::new(CapacityConstraintModule::<MultiDimLoad>::new_with_multi_trip(
transport,
CAPACITY_CONSTRAINT_CODE,
Arc::new(ReloadMultiTrip::new(Box::new(move |capacity| *capacity * threshold))),
))
} else {
Box::new(CapacityConstraintModule::<SingleDimLoad>::new_with_multi_trip(
transport,
CAPACITY_CONSTRAINT_CODE,
Arc::new(ReloadMultiTrip::new(Box::new(move |capacity| *capacity * threshold))),
))
}
} else if props.has_multi_dimen_capacity {
Box::new(CapacityConstraintModule::<MultiDimLoad>::new(transport, CAPACITY_CONSTRAINT_CODE))
} else {
Box::new(CapacityConstraintModule::<SingleDimLoad>::new(transport, CAPACITY_CONSTRAINT_CODE))
});
}
fn add_area_module(constraint: &mut ConstraintPipeline, coord_index: Arc<CoordIndex>) {
constraint.add_module(Box::new(AreaModule::new(
Arc::new(|actor| actor.vehicle.dimens.get_value::<Vec<Area>>("areas")),
Arc::new(move |location| {
coord_index
.get_by_idx(location)
.map_or_else(|| panic!("cannot find location!"), |location| location.to_lat_lng())
}),
AREA_CONSTRAINT_CODE,
)));
}
fn add_tour_size_module(constraint: &mut ConstraintPipeline) {
constraint.add_module(Box::new(TourSizeModule::new(
Arc::new(|actor| actor.vehicle.dimens.get_value::<usize>("tour_size").cloned()),
TOUR_SIZE_CONSTRAINT_CODE,
)));
}
fn create_extras(
constraint: Arc<ConstraintPipeline>,
props: &ProblemProperties,
job_index: JobIndex,
coord_index: Arc<CoordIndex>,
) -> Extras {
let mut extras = Extras::default();
extras.insert(
"capacity_type".to_string(),
Arc::new((if props.has_multi_dimen_capacity { "multi" } else { "single" }).to_string()),
);
extras.insert("coord_index".to_owned(), coord_index);
extras.insert("job_index".to_owned(), Arc::new(job_index.clone()));
if props.has_dispatch {
extras.insert("route_modifier".to_owned(), Arc::new(get_route_modifier(constraint, job_index)));
}
extras
}
fn parse_time_window(tw: &[String]) -> TimeWindow {
assert_eq!(tw.len(), 2);
TimeWindow::new(parse_time(tw.first().unwrap()), parse_time(tw.last().unwrap()))
}
fn get_problem_properties(api_problem: &ApiProblem, matrices: &[Matrix]) -> ProblemProperties {
let has_unreachable_locations = matrices.iter().any(|m| m.error_codes.is_some());
let has_multi_dimen_capacity = api_problem.fleet.vehicles.iter().any(|t| t.capacity.len() > 1)
|| api_problem.plan.jobs.iter().any(|job| {
job.pickups
.iter()
.chain(job.deliveries.iter())
.flat_map(|tasks| tasks.iter())
.any(|task| task.demand.as_ref().map_or(false, |d| d.len() > 1))
});
let has_breaks = api_problem
.fleet
.vehicles
.iter()
.flat_map(|t| &t.shifts)
.any(|shift| shift.breaks.as_ref().map_or(false, |b| !b.is_empty()));
let has_skills = api_problem.plan.jobs.iter().any(|job| job.skills.is_some());
let max_job_value = api_problem
.plan
.jobs
.iter()
.filter_map(|job| job.value)
.filter(|value| *value > 0.)
.max_by(|a, b| compare_floats(*a, *b));
let has_dispatch = api_problem
.fleet
.vehicles
.iter()
.any(|t| t.shifts.iter().any(|s| s.dispatch.as_ref().map_or(false, |dispatch| !dispatch.is_empty())));
let has_reloads = api_problem
.fleet
.vehicles
.iter()
.any(|t| t.shifts.iter().any(|s| s.reloads.as_ref().map_or(false, |reloads| !reloads.is_empty())));
let has_priorities = api_problem.plan.jobs.iter().filter_map(|job| job.priority).any(|priority| priority > 1);
let has_area_limits = api_problem
.fleet
.vehicles
.iter()
.any(|v| v.limits.as_ref().and_then(|l| l.allowed_areas.as_ref()).map_or(false, |a| !a.is_empty()));
let has_tour_size_limits =
api_problem.fleet.vehicles.iter().any(|v| v.limits.as_ref().map_or(false, |l| l.tour_size.is_some()));
ProblemProperties {
has_multi_dimen_capacity,
has_breaks,
has_skills,
has_unreachable_locations,
has_dispatch,
has_reloads,
has_priorities,
has_area_limits,
has_tour_size_limits,
max_job_value,
}
}
| 33.850242 | 115 | 0.650706 |
1c401e8976d062d06145e351801bc08dd4e79c50 | 26,936 | // Copyright (c) 2018-2020 MobileCoin Inc.
//! Serves node-to-node gRPC requests.
use crate::{
api::peer_service_error::PeerServiceError,
background_work_queue::BackgroundWorkQueueSenderFn,
consensus_service::{IncomingConsensusMsg, ProposeTxCallback},
counters,
tx_manager::{TxManager, TxManagerError},
};
use grpcio::{RpcContext, RpcStatus, UnarySink};
use mc_attest_api::attest::Message;
use mc_attest_enclave_api::{EnclaveMessage, PeerSession};
use mc_common::{
logger::{log, Logger},
ResponderId,
};
use mc_consensus_api::{
consensus_common::ProposeTxResponse,
consensus_peer::{
ConsensusMsg as GrpcConsensusMsg, ConsensusMsgResponse, ConsensusMsgResult,
GetLatestMsgResponse, GetTxsRequest, GetTxsResponse, TxHashesNotInCache,
},
consensus_peer_grpc::ConsensusPeerApi,
empty::Empty,
};
use mc_consensus_enclave::ConsensusEnclave;
use mc_ledger_db::Ledger;
use mc_peers::TxProposeAAD;
use mc_transaction_core::tx::TxHash;
use mc_util_grpc::{
rpc_enclave_err, rpc_internal_error, rpc_invalid_arg_error, rpc_logger, send_result,
};
use mc_util_metrics::SVC_COUNTERS;
use mc_util_serial::deserialize;
use std::{
convert::{TryFrom, TryInto},
str::FromStr,
sync::Arc,
};
// Callback method for returning the latest SCP message issued by the local node, used to
// implement the `fetch_latest_msg` RPC call.
type FetchLatestMsgFn = Arc<dyn Fn() -> Option<mc_peers::ConsensusMsg> + Sync + Send>;
#[derive(Clone)]
pub struct PeerApiService {
/// Enclave instance.
consensus_enclave: Arc<dyn ConsensusEnclave + Send + Sync>,
/// TxManager instance.
tx_manager: Arc<dyn TxManager + Send + Sync>,
/// Callback function for feeding consensus messages into ByzantineLedger.
incoming_consensus_msgs_sender: BackgroundWorkQueueSenderFn<IncomingConsensusMsg>,
/// Callback function for feeding transactions into ByzantineLedger.
scp_client_value_sender: ProposeTxCallback,
/// Ledger database.
ledger: Arc<dyn Ledger + Send + Sync>,
/// Callback function for getting the latest SCP statement the local node has issued.
fetch_latest_msg_fn: FetchLatestMsgFn,
/// List of recognized responder IDs to accept messages from.
/// We only want to accept messages from peers we can initiate outgoing requests to. That is
/// necessary for resolving TxHashes into Txs. If we received a consensus message from a peer
/// not on this list, we won't be able to reach out to it to ask for the transaction contents.
known_responder_ids: Vec<ResponderId>,
/// Logger.
logger: Logger,
}
impl PeerApiService {
/// Creates a PeerApiService.
///
/// # Arguments:
/// * `consensus_enclave` - The local node's consensus enclave.
/// * `ledger` - The local node's ledger.
/// * `tx_manager` - The local node's TxManager.
/// * `incoming_consensus_msgs_sender` - Callback for a new consensus message from a peer.
/// * `scp_client_value_sender` - Callback for proposed transactions.
/// * `fetch_latest_msg_fn` - Returns highest message emitted by this node.
/// * `known_responder_ids` - Messages from peers not on this "whitelist" are ignored.
/// * `logger` - Logger.
pub fn new(
consensus_enclave: Arc<dyn ConsensusEnclave + Send + Sync>,
ledger: Arc<dyn Ledger + Send + Sync>,
tx_manager: Arc<dyn TxManager + Send + Sync>,
incoming_consensus_msgs_sender: BackgroundWorkQueueSenderFn<IncomingConsensusMsg>,
scp_client_value_sender: ProposeTxCallback,
fetch_latest_msg_fn: FetchLatestMsgFn,
known_responder_ids: Vec<ResponderId>,
logger: Logger,
) -> Self {
Self {
consensus_enclave,
ledger,
tx_manager,
incoming_consensus_msgs_sender,
scp_client_value_sender,
fetch_latest_msg_fn,
known_responder_ids,
logger,
}
}
/// Handle transactions proposed by clients to a different node.
///
/// # Arguments
/// * `enclave_msg` - A message encrypted for this node's consensus enclave.
/// * `logger` -
///
/// # Returns
/// The number of blocks in the local ledger when the tx_propose request was handled.
fn handle_tx_propose(
&mut self,
enclave_msg: EnclaveMessage<PeerSession>,
logger: &Logger,
) -> Result<u64, PeerServiceError> {
let aad = enclave_msg.aad.clone();
let tx_contexts = self
.consensus_enclave
.peer_tx_propose(enclave_msg)
.map_err(PeerServiceError::Enclave)?;
// The node the originally received the transaction from a client,
// and the node that forwarded the transaction if not the origin_node.
let (origin_node, relayed_by) = {
mc_util_serial::deserialize::<TxProposeAAD>(&aad)
.map(|aad| (Some(aad.origin_node), Some(aad.relayed_by)))
.unwrap_or((None, None))
};
// The number of blocks in the local ledger when the tx_propose request was handled.
let num_blocks = self.ledger.num_blocks().map_err(|e| {
log::warn!(logger, "{}", e);
PeerServiceError::InternalError
})?;
// Handle each transaction.
for tx_context in tx_contexts {
let tx_hash = tx_context.tx_hash;
match self.tx_manager.insert(tx_context) {
Ok(tx_hash) => {
// Submit for consideration in next SCP slot.
(*self.scp_client_value_sender)(
tx_hash,
origin_node.as_ref(),
relayed_by.as_ref(),
);
}
Err(TxManagerError::TransactionValidation(err)) => {
log::debug!(
logger,
"Error validating transaction {tx_hash}: {err}",
tx_hash = tx_hash.to_string(),
err = format!("{:?}", err)
);
counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{:?}", err));
}
Err(err) => {
log::info!(
logger,
"tx_propose failed for {tx_hash}: {err}",
tx_hash = tx_hash.to_string(),
err = format!("{:?}", err)
);
}
};
}
Ok(num_blocks)
}
/// Handle a consensus message from another node.
fn handle_consensus_msg(
&mut self,
consensus_msg: mc_peers::ConsensusMsg,
from_responder_id: ResponderId,
) -> Result<(), PeerServiceError> {
// Ignore a consensus message from an unknown peer.
if !self.known_responder_ids.contains(&from_responder_id) {
return Err(PeerServiceError::UnknownPeer(from_responder_id.to_string()));
}
// A consensus message with a valid signature.
let verified_consensus_msg: mc_peers::VerifiedConsensusMsg = consensus_msg
.try_into()
.map_err(|_| PeerServiceError::ConsensusMsgInvalidSignature)?;
(self.incoming_consensus_msgs_sender)(IncomingConsensusMsg {
from_responder_id,
consensus_msg: verified_consensus_msg,
})
.map_err(|_| PeerServiceError::InternalError)
}
/// Returns the full, encrypted transactions corresponding to a list of transaction hashes.
fn handle_get_txs(
&mut self,
tx_hashes: Vec<TxHash>,
peer_session: PeerSession,
logger: &Logger,
) -> Result<EnclaveMessage<PeerSession>, PeerServiceError> {
self.tx_manager
.encrypt_for_peer(&tx_hashes, &[], &peer_session)
.map_err(|tx_manager_error| match tx_manager_error {
TxManagerError::NotInCache(tx_hashes) => {
PeerServiceError::UnknownTransactions(tx_hashes)
}
err => {
log::warn!(logger, "{}", err);
PeerServiceError::InternalError
}
})
}
}
impl ConsensusPeerApi for PeerApiService {
/// Handle transactions proposed by clients to a different node.
fn peer_tx_propose(
&mut self,
ctx: RpcContext,
request: Message,
sink: UnarySink<ProposeTxResponse>,
) {
let _timer = SVC_COUNTERS.req(&ctx);
let enclave_msg: EnclaveMessage<PeerSession> = request.into();
mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| {
let result: Result<ProposeTxResponse, RpcStatus> =
match self.handle_tx_propose(enclave_msg, logger) {
Ok(num_blocks) => {
let mut response = ProposeTxResponse::new();
response.set_num_blocks(num_blocks);
Ok(response)
}
Err(peer_service_error) => match peer_service_error {
PeerServiceError::Enclave(err) => Err(rpc_enclave_err(err, &logger)),
err => Err(rpc_internal_error("peer_tx_propose", err, &logger)),
},
};
send_result(ctx, sink, result, &logger)
});
}
/// Handle a consensus message from another peer.
fn send_consensus_msg(
&mut self,
ctx: RpcContext,
request: GrpcConsensusMsg,
sink: UnarySink<ConsensusMsgResponse>,
) {
let _timer = SVC_COUNTERS.req(&ctx);
mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| {
// The peer who delivered this message to us.
let from_responder_id = match ResponderId::from_str(request.get_from_responder_id()) {
Ok(responder_id) => responder_id,
Err(_) => {
let result = Err(rpc_invalid_arg_error(
"send_consensus_msg",
"from_responder_id",
&logger,
));
send_result(ctx, sink, result, &logger);
return;
}
};
let consensus_msg: mc_peers::ConsensusMsg = match deserialize(request.get_payload()) {
Ok(consensus_msg) => consensus_msg,
Err(_) => {
let result = Err(rpc_invalid_arg_error(
"send_consensus_msg",
"consensus_msg",
&logger,
));
send_result(ctx, sink, result, &logger);
return;
}
};
let result: Result<ConsensusMsgResponse, RpcStatus> = match self
.handle_consensus_msg(consensus_msg, from_responder_id)
{
Ok(()) => {
let mut response = ConsensusMsgResponse::new();
response.set_result(ConsensusMsgResult::Ok);
Ok(response)
}
Err(PeerServiceError::UnknownPeer(_)) => {
let mut response = ConsensusMsgResponse::new();
response.set_result(ConsensusMsgResult::UnknownPeer);
Ok(response)
}
Err(PeerServiceError::ConsensusMsgInvalidSignature) => Err(rpc_invalid_arg_error(
"send_consensus_msg",
"InvalidConsensusMsgSignature",
&logger,
)),
Err(_) => Err(rpc_internal_error(
"send_consensus_msg",
"InternalError",
&logger,
)),
};
send_result(ctx, sink, result, &logger);
});
}
/// Returns the highest consensus message issued by this node.
fn get_latest_msg(
&mut self,
ctx: RpcContext,
_request: Empty,
sink: UnarySink<GetLatestMsgResponse>,
) {
let _timer = SVC_COUNTERS.req(&ctx);
mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| {
let mut response = GetLatestMsgResponse::new();
if let Some(latest_msg) = (self.fetch_latest_msg_fn)() {
let serialized_msg = mc_util_serial::serialize(&latest_msg)
.expect("Failed serializing consensus msg");
response.set_payload(serialized_msg);
}
send_result(ctx, sink, Ok(response), &logger);
});
}
/// Returns the full, encrypted transactions corresponding to a list of transaction hashes.
fn get_txs(
&mut self,
ctx: RpcContext,
request: GetTxsRequest,
sink: UnarySink<GetTxsResponse>,
) {
let _timer = SVC_COUNTERS.req(&ctx);
mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| {
let mut tx_hashes: Vec<TxHash> = Vec::new();
for tx_hash_bytes in request.get_tx_hashes() {
match TxHash::try_from(&tx_hash_bytes[..]) {
Ok(tx_hash) => tx_hashes.push(tx_hash),
Err(_) => {
let result = Err(rpc_invalid_arg_error("tx_hash", (), &logger));
send_result(ctx, sink, result, &logger);
return;
}
}
}
let peer_session = PeerSession::from(request.get_channel_id());
let result: Result<GetTxsResponse, RpcStatus> =
match self.handle_get_txs(tx_hashes, peer_session, &logger) {
Ok(enclave_message) => {
let mut response = GetTxsResponse::new();
response.set_success(enclave_message.into());
Ok(response)
}
Err(PeerServiceError::UnknownTransactions(tx_hashes)) => {
let mut tx_hashes_not_in_cache = TxHashesNotInCache::new();
tx_hashes_not_in_cache.set_tx_hashes(
tx_hashes.iter().map(|tx_hash| tx_hash.to_vec()).collect(),
);
let mut response = GetTxsResponse::new();
response.set_tx_hashes_not_in_cache(tx_hashes_not_in_cache);
Ok(response)
}
// Unexpected errors:
Err(err) => Err(rpc_internal_error("get_txs", err, &logger)),
};
send_result(ctx, sink, result, &logger)
});
}
}
#[cfg(test)]
mod tests {
use crate::{
api::peer_api_service::PeerApiService, background_work_queue::BackgroundWorkQueueError,
consensus_service::IncomingConsensusMsg, tx_manager::MockTxManager,
};
use grpcio::{ChannelBuilder, Environment, Error::RpcFailure, Server, ServerBuilder};
use mc_common::{
logger::{test_with_logger, Logger},
NodeID, ResponderId,
};
use mc_consensus_api::{
consensus_peer::{ConsensusMsg, ConsensusMsgResult},
consensus_peer_grpc,
consensus_peer_grpc::ConsensusPeerApiClient,
};
use mc_consensus_enclave_mock::MockConsensusEnclave;
use mc_consensus_scp::{
msg::{NominatePayload, Topic::Nominate},
Msg, QuorumSet,
};
use mc_crypto_keys::{Ed25519Pair, Ed25519Private};
use mc_ledger_db::MockLedger;
use mc_peers;
use mc_transaction_core::{tx::TxHash, Block};
use mc_util_from_random::FromRandom;
use rand::{rngs::StdRng, SeedableRng};
use std::sync::Arc;
// Get sensibly-initialized mocks.
fn get_mocks() -> (MockConsensusEnclave, MockLedger, MockTxManager) {
let consensus_enclave = MockConsensusEnclave::new();
let ledger = MockLedger::new();
let tx_manager = MockTxManager::new();
(consensus_enclave, ledger, tx_manager)
}
/// Always returns OK.
fn get_incoming_consensus_msgs_sender_ok(
) -> Arc<dyn Fn(IncomingConsensusMsg) -> Result<(), BackgroundWorkQueueError> + Sync + Send>
{
Arc::new(|_msg: IncomingConsensusMsg| {
// TODO: store inputs for inspection.
Ok(())
})
}
// Does nothing.
fn get_scp_client_value_sender(
) -> Arc<dyn Fn(TxHash, Option<&NodeID>, Option<&ResponderId>) + Sync + Send> {
Arc::new(
|_tx_hash: TxHash, _node_id: Option<&NodeID>, _responder_id: Option<&ResponderId>| {
// Do nothing.
},
)
}
// Returns None.
fn get_fetch_latest_msg_fn() -> Arc<dyn Fn() -> Option<mc_peers::ConsensusMsg> + Sync + Send> {
Arc::new(|| None)
}
fn get_client_server(instance: PeerApiService) -> (ConsensusPeerApiClient, Server) {
let service = consensus_peer_grpc::create_consensus_peer_api(instance);
let env = Arc::new(Environment::new(1));
let mut server = ServerBuilder::new(env.clone())
.register_service(service)
.bind("127.0.0.1", 0)
.build()
.unwrap();
server.start();
let (_, port) = server.bind_addrs().next().unwrap();
let ch = ChannelBuilder::new(env).connect(&format!("127.0.0.1:{}", port));
let client = ConsensusPeerApiClient::new(ch);
(client, server)
}
#[test_with_logger]
// Should ignore a message from an unknown peer.
fn test_send_consensus_msg_ignore_unknown_peer(logger: Logger) {
let mut rng: StdRng = SeedableRng::from_seed([67u8; 32]);
let (consensus_enclave, ledger, tx_manager) = get_mocks();
// ResponderIds seem to be "host:port" strings.
let known_responder_ids = vec![
ResponderId("A:port".to_owned()),
ResponderId("B:port".to_owned()),
];
let instance = PeerApiService::new(
Arc::new(consensus_enclave),
Arc::new(ledger),
Arc::new(tx_manager),
get_incoming_consensus_msgs_sender_ok(),
get_scp_client_value_sender(),
get_fetch_latest_msg_fn(),
known_responder_ids.clone(),
logger,
);
let (client, _server) = get_client_server(instance);
// A message from an unknown peer.
let from = ResponderId("X:port".to_owned());
let node_x_signer_key = Ed25519Pair::from_random(&mut rng);
let scp_msg = Msg {
sender_id: NodeID {
responder_id: from.clone(),
public_key: node_x_signer_key.public_key(),
},
slot_index: 1,
quorum_set: QuorumSet {
threshold: 0,
members: vec![],
},
topic: Nominate(NominatePayload {
X: Default::default(),
Y: Default::default(),
}),
};
let payload = {
// Node A's ledger.
let mut ledger = MockLedger::new();
ledger
.expect_get_block()
.return_const(Ok(Block::new_origin_block(&vec![])));
mc_peers::ConsensusMsg::from_scp_msg(&ledger, scp_msg, &node_x_signer_key).unwrap()
};
let mut message = ConsensusMsg::new();
message.set_from_responder_id(from.to_string());
message.set_payload(mc_util_serial::serialize(&payload).unwrap());
match client.send_consensus_msg(&message) {
Ok(consensus_msg_response) => {
assert_eq!(
consensus_msg_response.get_result(),
ConsensusMsgResult::UnknownPeer
);
}
Err(e) => panic!("Unexpected error: {:?}", e),
}
}
#[test_with_logger]
// Should accept a message from a known peer.
fn test_send_consensus_msg_ok(logger: Logger) {
let mut rng: StdRng = SeedableRng::from_seed([77u8; 32]);
let (consensus_enclave, ledger, tx_manager) = get_mocks();
// Node A's private message signing keypair.
let node_a_signer_key = {
let private_key = Ed25519Private::from_random(&mut rng);
Ed25519Pair::from(private_key)
};
// ResponderIds seem to be "host:port" strings.
let known_responder_ids = vec![
ResponderId("A:port".to_owned()),
ResponderId("B:port".to_owned()),
];
let instance = PeerApiService::new(
Arc::new(consensus_enclave),
Arc::new(ledger),
Arc::new(tx_manager),
get_incoming_consensus_msgs_sender_ok(),
get_scp_client_value_sender(),
get_fetch_latest_msg_fn(),
known_responder_ids.clone(),
logger,
);
let (client, _server) = get_client_server(instance);
// A message from a known peer.
let from = known_responder_ids[0].clone();
let scp_msg = Msg {
sender_id: NodeID {
responder_id: from.clone(),
public_key: node_a_signer_key.public_key(),
},
slot_index: 1,
quorum_set: QuorumSet {
threshold: 0,
members: vec![],
},
topic: Nominate(NominatePayload {
X: Default::default(),
Y: Default::default(),
}),
};
let payload = {
// Node A's ledger.
let mut ledger = MockLedger::new();
ledger
.expect_get_block()
.return_const(Ok(Block::new_origin_block(&vec![])));
mc_peers::ConsensusMsg::from_scp_msg(&ledger, scp_msg, &node_a_signer_key).unwrap()
};
let mut message = ConsensusMsg::new();
message.set_from_responder_id(from.to_string());
message.set_payload(mc_util_serial::serialize(&payload).unwrap());
match client.send_consensus_msg(&message) {
Ok(consensus_msg_response) => {
assert_eq!(consensus_msg_response.get_result(), ConsensusMsgResult::Ok);
}
Err(e) => panic!("Unexpected error: {:?}", e),
}
// TODO: Should pass the message to incoming_consensus_msgs_sender
}
#[test_with_logger]
// Should return an error if the message cannot be deserialized.
fn test_send_consensus_msg_deserialize_error(logger: Logger) {
let (consensus_enclave, ledger, tx_manager) = get_mocks();
// ResponderIds seem to be "host:port" strings.
let known_responder_ids = vec![
ResponderId("A:port".to_owned()),
ResponderId("B:port".to_owned()),
];
let instance = PeerApiService::new(
Arc::new(consensus_enclave),
Arc::new(ledger),
Arc::new(tx_manager),
get_incoming_consensus_msgs_sender_ok(),
get_scp_client_value_sender(),
get_fetch_latest_msg_fn(),
known_responder_ids.clone(),
logger,
);
let (client, _server) = get_client_server(instance);
// A message from a known peer. The payload does not deserialize to a ConsensusMsg.
let mut message = ConsensusMsg::new();
let from = known_responder_ids[0].clone();
message.set_from_responder_id(from.to_string());
message.set_payload(vec![240, 159, 146, 150]); // UTF-8 "sparkle heart".
match client.send_consensus_msg(&message) {
Ok(response) => panic!("Unexpected response: {:?}", response),
Err(RpcFailure(_rpc_status)) => {
// This is expected.
// TODO: check status code.
}
Err(e) => panic!("Unexpected error: {:?}", e),
}
}
#[test_with_logger]
// Should return an error if the message signature is wrong.
fn test_send_consensus_msg_signature_error(logger: Logger) {
let mut rng: StdRng = SeedableRng::from_seed([77u8; 32]);
let (consensus_enclave, ledger, tx_manager) = get_mocks();
// Node A's private message signing keypair.
let node_a_signer_key = {
let private_key = Ed25519Private::from_random(&mut rng);
Ed25519Pair::from(private_key)
};
// ResponderIds seem to be "host:port" strings.
let known_responder_ids = vec![
ResponderId("A:port".to_owned()),
ResponderId("B:port".to_owned()),
];
let instance = PeerApiService::new(
Arc::new(consensus_enclave),
Arc::new(ledger),
Arc::new(tx_manager),
get_incoming_consensus_msgs_sender_ok(),
get_scp_client_value_sender(),
get_fetch_latest_msg_fn(),
known_responder_ids.clone(),
logger,
);
let (client, _server) = get_client_server(instance);
// A message from a known peer.
let from = known_responder_ids[0].clone();
let scp_msg = Msg {
sender_id: NodeID {
responder_id: from.clone(),
public_key: node_a_signer_key.public_key(),
},
slot_index: 1,
quorum_set: QuorumSet {
threshold: 0,
members: vec![],
},
topic: Nominate(NominatePayload {
X: Default::default(),
Y: Default::default(),
}),
};
let payload = {
// Sign the message with a different signer key.
let wrong_signer_key = {
let private_key = Ed25519Private::from_random(&mut rng);
Ed25519Pair::from(private_key)
};
let mut ledger = MockLedger::new();
ledger
.expect_get_block()
.return_const(Ok(Block::new_origin_block(&vec![])));
mc_peers::ConsensusMsg::from_scp_msg(&ledger, scp_msg, &wrong_signer_key).unwrap()
};
let mut message = ConsensusMsg::new();
message.set_from_responder_id(from.to_string());
message.set_payload(mc_util_serial::serialize(&payload).unwrap());
match client.send_consensus_msg(&message) {
Ok(response) => panic!("Unexpected response: {:?}", response),
Err(RpcFailure(_rpc_status)) => {
// This is expected.
// TODO: check status code.
}
Err(e) => panic!("Unexpected error: {:?}", e),
}
}
// TODO: fetch_latest_msg
// TODO: fetch_txs
// TODO: peer_tx_propose
}
| 36.797814 | 99 | 0.569312 |
f9378cffa566fdcf2dc0654c4318d6b572f3b65e | 1,242 | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rand;
extern crate regex;
macro_rules! regex_new {
($re:expr) => {{
use regex::bytes::Regex;
Regex::new($re)
}}
}
macro_rules! regex_set_new {
($res:expr) => {{
use regex::bytes::RegexSet;
RegexSet::new($res)
}}
}
macro_rules! regex {
($re:expr) => {
regex_new!($re).unwrap()
}
}
macro_rules! regex_set {
($res:expr) => {
regex_set_new!($res).unwrap()
}
}
// Must come before other module definitions.
include!("macros_bytes.rs");
include!("macros.rs");
mod api;
mod bytes;
mod crazy;
mod flags;
mod fowler;
mod multiline;
mod noparse;
mod regression;
mod replace;
mod set;
mod shortest_match;
mod suffix_reverse;
mod unicode;
mod word_boundary;
mod word_boundary_ascii;
| 21.050847 | 69 | 0.672303 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.