file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
image_repository.go
// Code generated by MockGen. DO NOT EDIT. // Source: ./image_repository.go // Package repositorymock is a generated GoMock package.
package repositorymock import ( context "context" domain "homeapi/domain" reflect "reflect" gomock "github.com/golang/mock/gomock" ) // MockImageRepository is a mock of ImageRepository interface. type MockImageRepository struct { ctrl *gomock.Controller recorder *MockImageRepositoryMockRecorder } // MockImageRepositoryMockRecorder is the mock recorder for MockImageRepository. type MockImageRepositoryMockRecorder struct { mock *MockImageRepository } // NewMockImageRepository creates a new mock instance. func NewMockImageRepository(ctrl *gomock.Controller) *MockImageRepository { mock := &MockImageRepository{ctrl: ctrl} mock.recorder = &MockImageRepositoryMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockImageRepository) EXPECT() *MockImageRepositoryMockRecorder { return m.recorder } // Insert mocks base method. func (m *MockImageRepository) Insert(ctx context.Context, image *domain.Image) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Insert", ctx, image) ret0, _ := ret[0].(error) return ret0 } // Insert indicates an expected call of Insert. func (mr *MockImageRepositoryMockRecorder) Insert(ctx, image interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockImageRepository)(nil).Insert), ctx, image) }
jquery-selective.min.js
/** * Admui-iframe v2.0.0 (http://www.admui.com/)
*/ !function(e,t,n){"use strict";jQuery.components.register("selective",{mode:"default",defaults:{search:function(){return'<input class="'+this.namespace+'-search" type="text" placeholder="搜索…">'},triggerButton:function(){return'<div class="'+this.namespace+'-trigger-button">添加</div>'}}})}(window,document);
* Copyright 2015-2018 Admui Team * Licensed under the Admui License 1.1 (http://www.admui.com/about/license)
.prettierrc.js
module.exports = require('@spraoi/prettier-config');
operations.rs
#![doc = "generated by AutoRust"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::models; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new( endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, ) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn operations(&self) -> operations::Client { operations::Client(self.clone()) } pub fn private_link_resources(&self) -> private_link_resources::Client { private_link_resources::Client(self.clone()) } pub fn recovery_services(&self) -> recovery_services::Client { recovery_services::Client(self.clone()) } pub fn registered_identities(&self) -> registered_identities::Client { registered_identities::Client(self.clone()) } pub fn replication_usages(&self) -> replication_usages::Client { replication_usages::Client(self.clone()) } pub fn usages(&self) -> usages::Client { usages::Client(self.clone()) } pub fn vault_certificates(&self) -> vault_certificates::Client { vault_certificates::Client(self.clone()) } pub fn vault_extended_info(&self) -> vault_extended_info::Client { vault_extended_info::Client(self.clone()) } pub fn vaults(&self) -> vaults::Client { vaults::Client(self.clone()) } } #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] VaultCertificates_Create(#[from] vault_certificates::create::Error), #[error(transparent)] RegisteredIdentities_Delete(#[from] registered_identities::delete::Error), #[error(transparent)] ReplicationUsages_List(#[from] replication_usages::list::Error), #[error(transparent)] PrivateLinkResources_List(#[from] private_link_resources::list::Error), #[error(transparent)] PrivateLinkResources_Get(#[from] private_link_resources::get::Error), #[error(transparent)] RecoveryServices_CheckNameAvailability(#[from] recovery_services::check_name_availability::Error), #[error(transparent)] Vaults_ListBySubscriptionId(#[from] vaults::list_by_subscription_id::Error), #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] Vaults_ListByResourceGroup(#[from] vaults::list_by_resource_group::Error), #[error(transparent)] Vaults_Get(#[from] vaults::get::Error), #[error(transparent)] Vaults_CreateOrUpdate(#[from] vaults::create_or_update::Error), #[error(transparent)] Vaults_Update(#[from] vaults::update::Error), #[error(transparent)] Vaults_Delete(#[from] vaults::delete::Error), #[error(transparent)] VaultExtendedInfo_Get(#[from] vault_extended_info::get::Error), #[error(transparent)] VaultExtendedInfo_CreateOrUpdate(#[from] vault_extended_info::create_or_update::Error), #[error(transparent)] VaultExtendedInfo_Update(#[from] vault_extended_info::update::Error), #[error(transparent)] GetOperationStatus(#[from] get_operation_status::Error), #[error(transparent)] GetOperationResult(#[from] get_operation_result::Error), #[error(transparent)] Usages_ListByVaults(#[from] usages::list_by_vaults::Error), } pub mod vault_certificates { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn create( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, certificate_name: impl Into<String>, certificate_request: impl Into<models::CertificateRequest>, ) -> create::Builder { create::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), certificate_name: certificate_name.into(), certificate_request: certificate_request.into(), } } } pub mod create { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) certificate_name: String, pub(crate) certificate_request: models::CertificateRequest, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultCertificateResponse, Error>> { Box::pin(async move { let url_str = &format!( "{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/certificates/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.certificate_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.certificate_request).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultCertificateResponse = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } } pub mod registered_identities { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn delete( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, identity_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), identity_name: identity_name.into(), } } } pub mod delete { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) identity_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> { Box::pin(async move { let url_str = &format!( "{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/registeredIdentities/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.identity_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::NO_CONTENT => Ok(()), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } } pub mod replication_usages { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> list::Builder { list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ReplicationUsageList, Error>> { Box::pin(async move { let url_str = &format!( "{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/replicationUsages", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ReplicationUsageList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } } pub mod private_link_resources { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Returns the list of private link resources that need to be created for Backup and SiteRecovery"] pub fn list( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> list::Builder { list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } #[doc = "Returns a specified private link resource that need to be created for Backup and SiteRecovery"] pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, private_link_resource_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), private_link_resource_name: private_link_resource_name.into(), } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PrivateLinkResources, Error>> {
let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateLinkResources", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::PrivateLinkResources = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) private_link_resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PrivateLinkResource, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateLinkResources/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.private_link_resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::PrivateLinkResource = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod recovery_services { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "API to check for resource name availability.\r\nA name is available if no other resource exists that has the same SubscriptionId, Resource Name and Type\r\nor if one or more such resources exist, each of these must be GC'd and their time of deletion be more than 24 Hours Ago"] pub fn check_name_availability( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, location: impl Into<String>, input: impl Into<models::CheckNameAvailabilityParameters>, ) -> check_name_availability::Builder { check_name_availability::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), location: location.into(), input: input.into(), } } } pub mod check_name_availability { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) location: String, pub(crate) input: models::CheckNameAvailabilityParameters, } impl Builder { pub fn into_future( self, ) -> futures::future::BoxFuture<'static, std::result::Result<models::CheckNameAvailabilityResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/locations/{}/checkNameAvailability", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.location ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.input).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CheckNameAvailabilityResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod vaults { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_subscription_id(&self, subscription_id: impl Into<String>) -> list_by_subscription_id::Builder { list_by_subscription_id::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), } } pub fn list_by_resource_group( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_resource_group::Builder { list_by_resource_group::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } pub fn create_or_update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, vault: impl Into<models::Vault>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), vault: vault.into(), } } pub fn update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, vault: impl Into<models::PatchVault>, ) -> update::Builder { update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), vault: vault.into(), } } pub fn delete( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } } pub mod list_by_subscription_id { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.RecoveryServices/vaults", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_by_resource_group { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults", self.client.endpoint(), &self.subscription_id, &self.resource_group_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Vault, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Vault), Created201(models::Vault), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) vault: models::Vault, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.vault).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Vault), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) vault: models::PatchVault, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.vault).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod operations { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ClientDiscoveryResponse, Error>> { Box::pin(async move { let url_str = &format!("{}/providers/Microsoft.RecoveryServices/operations", self.client.endpoint(),); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ClientDiscoveryResponse = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod vault_extended_info { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } pub fn create_or_update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, resource_resource_extended_info_details: impl Into<models::VaultExtendedInfoResource>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), resource_resource_extended_info_details: resource_resource_extended_info_details.into(), } } pub fn update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, resource_resource_extended_info_details: impl Into<models::VaultExtendedInfoResource>, ) -> update::Builder { update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), resource_resource_extended_info_details: resource_resource_extended_info_details.into(), } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultExtendedInfoResource, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/extendedInformation/vaultExtendedInfo" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultExtendedInfoResource = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) resource_resource_extended_info_details: models::VaultExtendedInfoResource, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultExtendedInfoResource, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/extendedInformation/vaultExtendedInfo" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.resource_resource_extended_info_details).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultExtendedInfoResource = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) resource_resource_extended_info_details: models::VaultExtendedInfoResource, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultExtendedInfoResource, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/extendedInformation/vaultExtendedInfo" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.resource_resource_extended_info_details).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultExtendedInfoResource = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } impl Client { pub fn get_operation_status( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, operation_id: impl Into<String>, ) -> get_operation_status::Builder { get_operation_status::Builder { client: self.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), operation_id: operation_id.into(), } } pub fn get_operation_result( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, operation_id: impl Into<String>, ) -> get_operation_result::Builder { get_operation_result::Builder { client: self.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), operation_id: operation_id.into(), } } } pub mod get_operation_status { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) operation_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationResource, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/operationStatus/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.operation_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::OperationResource = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get_operation_result { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Vault), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) operation_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/operationResults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.operation_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod usages { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_vaults( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> list_by_vaults::Builder { list_by_vaults::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } } pub mod list_by_vaults { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultUsageList, Error>> { Box::pin(async move { let url_str = &format!( "{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/usages", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-03-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultUsageList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } }
Box::pin(async move {
apps.py
from django.apps import AppConfig class
(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'course_application'
CourseApplicationConfig
table_row_service_test.go
package service_test import ( "naksha/logger" "naksha/service" "net/http" "net/url" "os" "testing" "time" ) func TestMain(m *testing.M) { logs_dir := "../../../test" logger.InitLoggers(logs_dir) code := m.Run() os.Exit(code) } // Test names are prefixed with TestRowTable instead of TestTableRow as is the // convention of naming tests of a service based on the name of the service. // As per the convention, tests of service table_row_service should have prefix // TestTableRow but these tests have prefix TestRowTable. This is because, tests // for the table_service.go start with TestTable; this makes it difficult to run // tests only of table_service with "-run TestTable" because that prefix will // match TestTableRow too. Hence, TestRowTable instead of TestTableRow which will // allow running the tests of table_service and table_row_service with the "-run" // switch. func TestRowTableData(t *testing.T) { map_dao := &TableRowDataImpl{&DaoImpl{}} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"table_id": "1", "page": "1"} values := url.Values{} values.Set("order_column", "name") values.Set("order_type", "asc") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Data(request) if !result.IsSuccess() { t.Errorf("Error returned instead of success") } tmp1, ok := result.GetDataByKey("rows") if !ok { t.Errorf("rows is not present") } _, ok = result.GetDataByKey("count") if !ok { t.Errorf("count is not present") } rows := tmp1.([]map[string]string) _, ok = rows[0]["the_geom_webmercator"] if ok { t.Errorf("the_geom_webmercator should not be included in the data") } } func TestRowTableUpdateInvalidForm(t *testing.T) { map_dao := &DaoImpl{} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"table_id": "124"} values := url.Values{} request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Update(request) if result.IsSuccess() { t.Errorf("Form validation failed") } } func TestRowTableUpdateNonGeom(t *testing.T) { map_dao := &TableRowUpdateNonGeomImpl{&DaoImpl{}, false} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"id": "1", "table_id": "1"} values := url.Values{} values.Set("column", "name") values.Set("value", "Some Name") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Update(request) if !result.IsSuccess() { t.Errorf("Update non geom field failed. %s", result.GetErrors()) } _, ok := result.GetDataByKey("update_hash") if ok { t.Errorf("update_hash should not be present for non geom field") } if !map_dao.IsUpdateCalled() { t.Errorf("Update has not been called on dao class") } } func TestRowTableUpdateGeomUpdateStyle(t *testing.T) { map_dao := &TableRowUpdateGeomImpl{&DaoImpl{}, "unknown", false, false, false} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"id": "1", "table_id": "1"} values := url.Values{} values.Set("column", "the_geom") values.Set("value", "SRID=4326;POINT(78.4622620046139 17.411652235651)") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Update(request) // wait for a second to make sure that trs.updateGeometryTypeStyle is called time.Sleep(1 * time.Millisecond) if !result.IsSuccess() { t.Errorf("Update geom field failed. %s", result.GetErrors()) } _, ok := result.GetDataByKey("update_hash") if !ok { t.Errorf("update_hash should be present for the_geom field") } if !map_dao.IsUpdateGeometryCalled() { t.Errorf("UpdateGeometry has not been called on dao class") } if !map_dao.IsFindWhereCalled() { t.Errorf("Geometry Type in layer table has not been queried for") } if !map_dao.IsFetchDataCalled() { t.Errorf("Geometry type has not been queried for on the table") } } func TestRowTableUpdateGeomNoStyle(t *testing.T) { map_dao := &TableRowUpdateGeomImpl{&DaoImpl{}, "polygon", false, false, false} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"id": "1", "table_id": "1"} values := url.Values{} values.Set("column", "the_geom") values.Set("value", "SRID=4326;POINT(78.4622620046139 17.411652235651)") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Update(request) // wait for a second to make sure that trs.updateGeometryTypeStyle is called time.Sleep(1 * time.Millisecond) if !result.IsSuccess() { t.Errorf("Update geom field failed. %s", result.GetErrors()) } _, ok := result.GetDataByKey("update_hash") if !ok { t.Errorf("update_hash should be present for the_geom field") } if !map_dao.IsUpdateGeometryCalled() { t.Errorf("UpdateGeometry has not been called on dao class") } if !map_dao.IsFindWhereCalled() { t.Errorf("FindWhere has not been called on dao class") } if map_dao.IsFetchDataCalled() { t.Errorf("Style has been updated when it shouldn't be") } } func TestRowTableAddInvalidForm(t *testing.T) { map_dao := &DaoImpl{} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"table_id": "42"} values := url.Values{} values.Set("with_geometry", "1") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Add(request) if result.IsSuccess() { t.Errorf("Input validation failed") } } func TestRowTableAddWithoutGeometry(t *testing.T) { map_dao := &TableRowAddNonGeomImpl{&DaoImpl{}, false} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"table_id": "42"}
request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Add(request) if !result.IsSuccess() { t.Errorf("Error returned") } _, ok := result.GetDataByKey("row") if !ok { t.Errorf("row should be present in data") } if !map_dao.IsInsertCalled() { t.Errorf("Insert has not been called") } } func TestRowTableAddGeometryNoStyle(t *testing.T) { map_dao := &TableRowAddGeomImpl{&DaoImpl{}, "polygon", false, false, false} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"table_id": "42"} values := url.Values{} values.Set("with_geometry", "1") values.Set("geometry", "SRID=4326;POINT(78.4622620046139 17.411652235651)") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Add(request) // wait for a second to make sure that trs.updateGeometryTypeStyle is called time.Sleep(100 * time.Millisecond) if !result.IsSuccess() { t.Errorf("Error returned") } _, ok := result.GetDataByKey("row") if !ok { t.Errorf("row should be present in data") } if !map_dao.insert_geometry_called { t.Errorf("InsertWithGeometry not called") } if !map_dao.find_where_called { t.Errorf("FindWhere not called") } if map_dao.fetch_data_called { t.Errorf("Geometry type queried for even when geometry type is set in mstr_layer") } } func TestRowTableAddGeometryUpdateStyle(t *testing.T) { map_dao := &TableRowAddGeomImpl{&DaoImpl{}, "unknown", false, false, false} service := service.MakeTableRowService(map_dao) uri_params := map[string]string{"table_id": "42"} values := url.Values{} values.Set("with_geometry", "1") values.Set("geometry", "SRID=4326;POINT(78.4622620046139 17.411652235651)") request := makeRequest(http.MethodPost, uri_params, values, true) result := service.Add(request) // wait for a second to make sure that trs.updateGeometryTypeStyle is called time.Sleep(100 * time.Millisecond) if !result.IsSuccess() { t.Errorf("Error returned") } _, ok := result.GetDataByKey("row") if !ok { t.Errorf("row should be present in data") } if !map_dao.insert_geometry_called { t.Errorf("InsertWithGeometry not called") } if !map_dao.find_where_called { t.Errorf("mstr_layer not queried for geometry_type") } if !map_dao.fetch_data_called { t.Errorf("Geometry type not queried for when geometry type is 'unknown'in mstr_layer") } } func TestRowTableDelete(t *testing.T) { map_dao := &TableRowDeleteImpl{&DaoImpl{}} service := service.MakeTableRowService(map_dao) result := service.Delete("323", "12") if !result.IsSuccess() { t.Errorf("Delete failed") } _, ok := result.GetDataByKey("update_hash") if !ok { t.Errorf("update_hash not present in data") } } func TestRowTableShow(t *testing.T) { map_dao := &TableRowShowImpl{&DaoImpl{}} service := service.MakeTableRowService(map_dao) result := service.Show("523", "92") if !result.IsSuccess() { t.Errorf("Could not get details") } _, ok := result.GetDataByKey("data") if !ok { t.Errorf("data is not present in result") } } func TestRowTableTableBelongsToUserFalse(t *testing.T) { map_dao := &DaoImpl{} service := service.MakeTableRowService(map_dao) belongs := service.TableBelongsToUser("143", 3) if belongs { t.Errorf("TableBelongsToUser: unauthorized user given access") } } func TestRowTableTableBelongsToUserTrue(t *testing.T) { map_dao := &TableRowBelongsImpl{&DaoImpl{}} service := service.MakeTableRowService(map_dao) belongs := service.TableBelongsToUser("1432", 2) if !belongs { t.Errorf("TableBelongsToUser: authorized user denied access") } }
values := url.Values{} values.Set("with_geometry", "0") values.Set("name", "Some Name")
apps_v1beta1_api.py
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.12.4 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class AppsV1beta1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_controller_revision(self, namespace, body, **kwargs): """ create a ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_controller_revision(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1ControllerRevision body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs) return data def create_namespaced_controller_revision_with_http_info(self, namespace, body, **kwargs): """ create a ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_controller_revision_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1ControllerRevision body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_controller_revision`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1ControllerRevision', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_namespaced_deployment(self, namespace, body, **kwargs): """ create a Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_deployment(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Deployment body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs) return data def create_namespaced_deployment_with_http_info(self, namespace, body, **kwargs): """ create a Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_deployment_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Deployment body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_namespaced_deployment_rollback(self, name, namespace, body, **kwargs): """ create rollback of a Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_deployment_rollback(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DeploymentRollback (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1DeploymentRollback body: (required) :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, **kwargs) else: (data) = self.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, **kwargs) return data def create_namespaced_deployment_rollback_with_http_info(self, name, namespace, body, **kwargs): """ create rollback of a Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DeploymentRollback (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1DeploymentRollback body: (required) :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'dry_run', 'include_uninitialized', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_deployment_rollback" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `create_namespaced_deployment_rollback`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment_rollback`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment_rollback`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/rollback', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_namespaced_stateful_set(self, namespace, body, **kwargs): """ create a StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_stateful_set(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1StatefulSet body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs) return data def create_namespaced_stateful_set_with_http_info(self, namespace, body, **kwargs): """ create a StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_stateful_set_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1StatefulSet body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_stateful_set" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_stateful_set`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_controller_revision(self, namespace, **kwargs): """ delete collection of ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_controller_revision(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_controller_revision_with_http_info(self, namespace, **kwargs): """ delete collection of ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_controller_revision_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_deployment(self, namespace, **kwargs): """ delete collection of Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_deployment(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_deployment_with_http_info(self, namespace, **kwargs): """ delete collection of Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_deployment_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_deployment`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_stateful_set(self, namespace, **kwargs): """ delete collection of StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_stateful_set(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_stateful_set_with_http_info(self, namespace, **kwargs): """ delete collection of StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_stateful_set_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_stateful_set" % key ) params[key] = val del params['kwargs']
# verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_controller_revision(self, name, namespace, body, **kwargs): """ delete a ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_controller_revision(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DeleteOptions body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) else: (data) = self.delete_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) return data def delete_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs): """ delete a ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DeleteOptions body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_controller_revision`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_controller_revision`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_deployment(self, name, namespace, body, **kwargs): """ delete a Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_deployment(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DeleteOptions body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) else: (data) = self.delete_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) return data def delete_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs): """ delete a Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_deployment_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DeleteOptions body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_deployment`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_deployment`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_deployment`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_stateful_set(self, name, namespace, body, **kwargs): """ delete a StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_stateful_set(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DeleteOptions body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) else: (data) = self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) return data def delete_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs): """ delete a StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DeleteOptions body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_stateful_set" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_stateful_set`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_stateful_set`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_controller_revision_for_all_namespaces(self, **kwargs): """ list or watch objects of kind ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_controller_revision_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ControllerRevisionList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs) return data def list_controller_revision_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_controller_revision_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ControllerRevisionList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_controller_revision_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/controllerrevisions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1ControllerRevisionList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_deployment_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_deployment_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: AppsV1beta1DeploymentList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_deployment_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_deployment_for_all_namespaces_with_http_info(**kwargs) return data def list_deployment_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_deployment_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: AppsV1beta1DeploymentList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_deployment_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/deployments', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1DeploymentList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_controller_revision(self, namespace, **kwargs): """ list or watch objects of kind ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_controller_revision(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ControllerRevisionList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs) return data def list_namespaced_controller_revision_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_controller_revision_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ControllerRevisionList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1ControllerRevisionList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_deployment(self, namespace, **kwargs): """ list or watch objects of kind Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_deployment(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: AppsV1beta1DeploymentList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_deployment_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_deployment_with_http_info(namespace, **kwargs) return data def list_namespaced_deployment_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_deployment_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: AppsV1beta1DeploymentList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_deployment`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1DeploymentList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_stateful_set(self, namespace, **kwargs): """ list or watch objects of kind StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_stateful_set(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1StatefulSetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs) return data def list_namespaced_stateful_set_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_stateful_set_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1StatefulSetList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_stateful_set" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSetList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_stateful_set_for_all_namespaces(self, **kwargs): """ list or watch objects of kind StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_stateful_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1StatefulSetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) return data def list_stateful_set_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_stateful_set_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1StatefulSetList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_stateful_set_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/statefulsets', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSetList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs): """ partially update the specified ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_controller_revision`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_controller_revision`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1ControllerRevision', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_deployment(self, name, namespace, body, **kwargs): """ partially update the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_deployment(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_deployment_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs): """ partially update scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs): """ partially update scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_deployment_scale" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_scale`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_scale`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_scale`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Scale', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_deployment_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_deployment_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_deployment_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_stateful_set(self, name, namespace, body, **kwargs): """ partially update the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_stateful_set" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): """ partially update scale of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs): """ partially update scale of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_stateful_set_scale" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_scale`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_scale`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_scale`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Scale', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_stateful_set_status(self, name, namespace, body, **kwargs): """ partially update status of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_stateful_set_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_controller_revision(self, name, namespace, **kwargs): """ read the specified ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_controller_revision(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs) return data def read_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs): """ read the specified ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_controller_revision_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_controller_revision`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1ControllerRevision', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_deployment(self, name, namespace, **kwargs): """ read the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs) return data def read_namespaced_deployment_with_http_info(self, name, namespace, **kwargs): """ read the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_deployment_scale(self, name, namespace, **kwargs): """ read scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment_scale(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs) return data def read_namespaced_deployment_scale_with_http_info(self, name, namespace, **kwargs): """ read scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment_scale_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_deployment_scale" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_scale`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_scale`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Scale', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_deployment_status(self, name, namespace, **kwargs): """ read status of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_deployment_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_deployment_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_stateful_set(self, name, namespace, **kwargs): """ read the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) return data def read_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs): """ read the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_stateful_set_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_stateful_set" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_stateful_set_scale(self, name, namespace, **kwargs): """ read scale of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_stateful_set_scale(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs) return data def read_namespaced_stateful_set_scale_with_http_info(self, name, namespace, **kwargs): """ read scale of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_stateful_set_scale_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_stateful_set_scale" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_scale`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_scale`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Scale', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_stateful_set_status(self, name, namespace, **kwargs): """ read status of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_stateful_set_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_stateful_set_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_stateful_set_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_stateful_set_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs): """ replace the specified ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1ControllerRevision body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified ControllerRevision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1ControllerRevision body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1ControllerRevision If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_controller_revision" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_controller_revision`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_controller_revision`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_controller_revision`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1ControllerRevision', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_deployment(self, name, namespace, body, **kwargs): """ replace the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Deployment body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Deployment body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_deployment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs): """ replace scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs): """ replace scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_deployment_scale" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_scale`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_scale`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_scale`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Scale', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs): """ replace status of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Deployment body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Deployment body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Deployment If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_deployment_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Deployment', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_stateful_set(self, name, namespace, body, **kwargs): """ replace the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1StatefulSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1StatefulSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_stateful_set" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): """ replace scale of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs): """ replace scale of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param AppsV1beta1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: AppsV1beta1Scale If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_stateful_set_scale" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_scale`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_scale`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_scale`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AppsV1beta1Scale', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_stateful_set_status(self, name, namespace, body, **kwargs): """ replace status of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1StatefulSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StatefulSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1StatefulSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1StatefulSet If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_stateful_set_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1beta1StatefulSet', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
metrics.go
package srvgrpc import ( "context" "time" "github.com/go-kit/kit/metrics" "github.com/grpc-ecosystem/go-grpc-prometheus" "google.golang.org/grpc" ) // MetricsModule exposes prometheus metrics. Here only provides a simple call, // more complex use, please refer to github.com/grpc-ecosystem/go-grpc-prometheus. // // Need to actively provide grpc.Server: // opts := []grpc.ServerOption{ // grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), // grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), // } // server = grpc.NewServer(opts...) type MetricsModule struct{} // ProvideGRPC implements container.GRPCProvider func (m MetricsModule) ProvideGRPC(server *grpc.Server) { grpc_prometheus.Register(server) } // Metrics is a unary interceptor for grpc package. It records the request duration in a histogram. func
(metrics *RequestDurationSeconds) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { start := time.Now() defer func() { metrics.Route(info.FullMethod).Observe(time.Since(start).Seconds()) }() return handler(ctx, req) } } // RequestDurationSeconds is a wrapper around a histogram that measures the // request latency. The RequestDurationSeconds exposes label setters such as // module, service and route. If a label is set more than once, the one set last // will take precedence. type RequestDurationSeconds struct { // histogram is the underlying histogram of RequestDurationSeconds. histogram metrics.Histogram // labels module string service string route string } // NewRequestDurationSeconds returns a new RequestDurationSeconds instance. func NewRequestDurationSeconds(histogram metrics.Histogram) *RequestDurationSeconds { return &RequestDurationSeconds{ histogram: histogram, module: "unknown", service: "unknown", route: "unknown", } } // Module specifies the module label for RequestDurationSeconds. func (r *RequestDurationSeconds) Module(module string) *RequestDurationSeconds { return &RequestDurationSeconds{ histogram: r.histogram, module: module, service: r.service, route: r.route, } } // Service specifies the service label for RequestDurationSeconds. func (r *RequestDurationSeconds) Service(service string) *RequestDurationSeconds { return &RequestDurationSeconds{ histogram: r.histogram, module: r.module, service: service, route: r.route, } } // Route specifies the method label for RequestDurationSeconds. func (r *RequestDurationSeconds) Route(route string) *RequestDurationSeconds { return &RequestDurationSeconds{ histogram: r.histogram, module: r.module, service: r.service, route: route, } } // Observe records the time taken to process the request. func (r RequestDurationSeconds) Observe(seconds float64) { r.histogram.With("module", r.module, "service", r.service, "route", r.route).Observe(seconds) }
Metrics
remote_indexer.go
/* Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package indexer import ( "context" "strings" state_protos "magma/orc8r/cloud/go/services/state/protos" state_types "magma/orc8r/cloud/go/services/state/types" merrors "magma/orc8r/lib/go/errors" "magma/orc8r/lib/go/registry" "github.com/golang/glog" ) // remoteIndexer identifies a remote state indexer. // The remote indexer's fields are cached at the state service. type remoteIndexer struct { // service name of the indexer // should always be lowercase to match service registry convention service string // version of the indexer version Version // types is the types of state the indexer should receive types []string } // NewRemoteIndexer returns an indexer that forwards its methods to the // remote indexer servicer. func
(serviceName string, version Version, types ...string) Indexer { return &remoteIndexer{service: strings.ToLower(serviceName), version: version, types: types} } func (r *remoteIndexer) GetID() string { return r.service } func (r *remoteIndexer) GetVersion() Version { return r.version } func (r *remoteIndexer) GetTypes() []string { return r.types } func (r *remoteIndexer) PrepareReindex(from, to Version, isFirstReindex bool) error { c, err := r.getIndexerClient() if err != nil { return err } _, err = c.PrepareReindex(context.Background(), &state_protos.PrepareReindexRequest{ IndexerId: r.service, FromVersion: uint32(from), ToVersion: uint32(to), IsFirst: isFirstReindex, }) return err } func (r *remoteIndexer) CompleteReindex(from, to Version) error { c, err := r.getIndexerClient() if err != nil { return err } _, err = c.CompleteReindex(context.Background(), &state_protos.CompleteReindexRequest{ IndexerId: r.service, FromVersion: uint32(from), ToVersion: uint32(to), }) return err } func (r *remoteIndexer) Index(networkID string, states state_types.SerializedStatesByID) (state_types.StateErrors, error) { if len(states) == 0 { return nil, nil } var reporterHWID string for _, st := range states { reporterHWID = st.ReporterID break } c, err := r.getIndexerClient() if err != nil { return nil, err } pStates, err := state_types.MakeProtoStates(states) if err != nil { return nil, err } res, err := c.Index(context.Background(), &state_protos.IndexRequest{ States: pStates, NetworkId: networkID, ReporterHwid: reporterHWID, }) if err != nil { return nil, err } return state_types.MakeStateErrors(res.StateErrors), nil } func (r *remoteIndexer) getIndexerClient() (state_protos.IndexerClient, error) { conn, err := registry.GetConnection(r.service) if err != nil { initErr := merrors.NewInitError(err, r.service) glog.Error(initErr) return nil, initErr } return state_protos.NewIndexerClient(conn), nil }
NewRemoteIndexer
test_fixture1.py
from selenium import webdriver link = "http://selenium1py.pythonanywhere.com/" class TestMainPage1(): @classmethod def setup_class(self): print("\nstart browser for test suite..") self.browser = webdriver.Chrome() @classmethod def teardown_class(self): print("quit browser for test suite..") self.browser.quit() def test_guest_should_see_login_link(self): self.browser.get(link) self.browser.find_element_by_css_selector("#login_link") def test_guest_should_see_basket_link_on_the_main_page(self): self.browser.get(link) self.browser.find_element_by_css_selector( ".basket-mini .btn-group > a") class
(): def setup_method(self): print("start browser for test..") self.browser = webdriver.Chrome() def teardown_method(self): print("quit browser for test..") self.browser.quit() def test_guest_should_see_login_link(self): self.browser.get(link) self.browser.find_element_by_css_selector("#login_link") def test_guest_should_see_basket_link_on_the_main_page(self): self.browser.get(link) self.browser.find_element_by_css_selector( ".basket-mini .btn-group > a")
TestMainPage2
service.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # http://www.apache.org/licenses/LICENSE-2.0 # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ CustomService class definitions """ import logging import time from builtins import str import mms from mms.context import Context, RequestProcessor from mms.metrics.metrics_store import MetricsStore from mms.protocol.otf_message_handler import create_predict_response PREDICTION_METRIC = 'PredictionTime' logger = logging.getLogger(__name__) class Service(object): """ Wrapper for custom entry_point """ def __init__(self, model_name, model_dir, manifest, entry_point, gpu, batch_size): self._context = Context(model_name, model_dir, manifest, batch_size, gpu, mms.__version__) self._entry_point = entry_point @property def
(self): return self._context @staticmethod def retrieve_data_for_inference(batch): """ REQUEST_INPUT = { "requestId" : "111-222-3333", "parameters" : [ PARAMETER ] } PARAMETER = { "name" : parameter name "contentType": "http-content-types", "value": "val1" } :param batch: :return: """ if batch is None: raise ValueError("Received invalid inputs") req_to_id_map = {} headers = dict() input_batch = [] for batch_idx, request_batch in enumerate(batch): req_id = request_batch.get('requestId').decode("utf-8") parameters = request_batch['parameters'] model_in_headers = dict() model_in = dict() for parameter in parameters: model_in.update({parameter["name"]: parameter["value"]}) model_in_headers.update({parameter["name"]: {"content-type": parameter["contentType"]}}) headers.update({req_id: model_in_headers}) input_batch.append(model_in) req_to_id_map[batch_idx] = req_id return headers, input_batch, req_to_id_map def predict(self, batch): """ PREDICT COMMAND = { "command": "predict", "batch": [ REQUEST_INPUT ] } :param batch: list of request :return: """ headers, input_batch, req_id_map = Service.retrieve_data_for_inference(batch) self.context.request_ids = req_id_map self.context.request_processor = RequestProcessor(headers) metrics = MetricsStore(req_id_map, self.context.model_name) self.context.metrics = metrics start_time = time.time() # noinspection PyBroadException try: ret = self._entry_point(input_batch, self.context) except Exception: # pylint: disable=broad-except logger.warning("Invoking custom service failed.", exc_info=True) return create_predict_response(None, req_id_map, "Prediction failed", 503) if not isinstance(ret, list): logger.warning("model: %s, Invalid return type: %s.", self.context.model_name, type(ret)) return create_predict_response(None, req_id_map, "Invalid model predict output", 503) if len(ret) != len(input_batch): logger.warning("model: %s, number of batch response mismatched, expect: %d, got: %d.", self.context.model_name, len(input_batch), len(ret)) return create_predict_response(None, req_id_map, "number of batch response mismatched", 503) duration = round((time.time() - start_time) * 1000, 2) metrics.add_time(PREDICTION_METRIC, duration) return create_predict_response(ret, req_id_map, "Prediction success", 200, context=self.context) def emit_metrics(metrics): """ Emit the metrics in the provided Dictionary Parameters ---------- metrics: Dictionary A dictionary of all metrics, when key is metric_name value is a metric object """ if metrics: for met in metrics: logger.info("[METRICS]%s", str(met))
context
signup_board.rs
use crate::embeds::CrossroadsEmbeds; use crate::{data, data::SignupBoardData, db, interactions, logging::LogTrace}; use anyhow::Result; use chrono::NaiveDate; use itertools::Itertools; use serenity::builder::CreateEmbed; use serenity::{model::prelude::*, prelude::*}; use serenity_tools::builder::CreateEmbedExt; use std::{mem, sync::Arc}; const OVERVIEW_CHANNEL_ID: &str = "overview_channel_id"; const OVERVIEW_MESSAGE_ID: &str = "overview_message_id"; const CROSS_EMOJI: char = '❌'; const RUNNING_EMOJI: char = '🏃'; const GREEN_CIRCLE_EMOJI: char = '🟢'; const CONSTRUCTION_SITE_EMOJI: char = '🚧'; const LOCK_EMOJI: char = '🔒'; // Hold on to often used values pub struct SignupBoard { pub overview_channel_id: Option<ChannelId>, pub overview_message_id: Option<MessageId>, } #[derive(Debug)] pub enum SignupBoardError { OverviewMessageNotSet, OverviewChannelNotSet, ChannelNotFound(ChannelId), } impl std::fmt::Display for SignupBoardError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::ChannelNotFound(id) => { write!(f, "Channel with id: {} not found on Signupboard", id) } Self::OverviewMessageNotSet => write!(f, "Overview message not set"), Self::OverviewChannelNotSet => write!(f, "Overview channel not set"), } } } impl std::error::Error for SignupBoardError {} // loads the main guild id, that is always set async fn load_guild_id(ctx: &Context) -> Result<GuildId> { // Load guild id provided on startup let guild_id = ctx .data .read() .await .get::<data::ConfigValuesData>() .unwrap() .main_guild_id; Ok(guild_id) } pub(crate) fn title_sort_value(t: &db::Raid) -> u64 { if t.title.contains("Beginner") { return 10; } if t.title.contains("Intermediate") { return 8; } if t.title.contains("Practice") { return 6; } 0 } impl SignupBoard { // get a lock on the SignupBoardConfig pub async fn get(ctx: &Context) -> Arc<RwLock<SignupBoard>> { ctx.data .read() .await .get::<SignupBoardData>() .unwrap() .clone() } pub async fn load_from_db(&mut self, ctx: &Context) -> Result<()> { let new_board = SignupBoard { overview_channel_id: match db::Config::load(ctx, OVERVIEW_CHANNEL_ID.to_string()).await { Ok(conf) => Some(conf.value.parse::<ChannelId>()?), Err(diesel::NotFound) => None, Err(e) => return Err(e.into()), }, overview_message_id: match db::Config::load(ctx, OVERVIEW_MESSAGE_ID.to_string()).await { Ok(conf) => Some(conf.value.parse::<u64>()?.into()), Err(diesel::NotFound) => None, Err(e) => return Err(e.into()), }, }; // overwrite at once and not value by value let _ = mem::replace(self, new_board); Ok(()) } pub async fn save_to_db(&self, ctx: &Context) -> Result<()> { if let Some(oci) = self.overview_channel_id { db::Config { name: OVERVIEW_CHANNEL_ID.to_string(), value: oci.to_string(), } .save(ctx) .await?; } if let Some(omi) = self.overview_message_id { db::Config { name: OVERVIEW_MESSAGE_ID.to_string(), value: omi.to_string(), } .save(ctx) .await?; } Ok(()) } /// Saves the channel to be used for the overview message pub async fn set_channel( &mut self, ctx: &Context, chan: ChannelId, trace: LogTrace, ) -> Result<()> { trace.step("Looking for channel in guild"); let gid = load_guild_id(ctx).await?; let channels = gid.channels(ctx).await?; if let Some(channel) = channels.get(&chan) { trace.step("Found. Setting new channel internally"); self.overview_channel_id = Some(channel.id); } else { return Err(SignupBoardError::ChannelNotFound(chan).into()); } Ok(()) } /// Creates the message for the overview and saves the message id internally pub async fn create_overview(&mut self, ctx: &Context, trace: LogTrace) -> Result<()> { trace.step("Loading channel for overview"); let chan = match self.overview_channel_id { Some(c) => c, None => return Err(SignupBoardError::OverviewChannelNotSet.into()), }; trace.step("Writing initial message to overview"); let msg = chan .send_message(ctx, |m| { m.set_embed(CreateEmbed::info_box("Setting up overview message")) }) .await?; trace.step("Setting new message internally"); self.overview_message_id = Some(msg.id); Ok(()) } /// Loads all relevant raid(s) from the db and updates the overview message pub async fn update_overview(&self, ctx: &Context, trace: LogTrace) -> Result<()> { trac
e.step("Loading overview information"); let msg = match self.overview_message_id { Some(m) => m, None => return Err(SignupBoardError::OverviewMessageNotSet.into()), }; let chan = match self.overview_channel_id { Some(c) => c, None => return Err(SignupBoardError::OverviewChannelNotSet.into()), }; trace.step("Loading raid(s)"); let active_raids = db::Raid::all_active(ctx).await?; struct TierInfo { _tier: db::Tier, discord: Vec<RoleId>, } struct RaidInfo { raid: db::Raid, signup_count: i64, tier_info: Option<TierInfo>, bosses: Vec<db::RaidBoss>, } trace.step("Loading additional traning info"); let mut raids: Vec<RaidInfo> = Vec::new(); for raid in active_raids { let signup_count = raid.get_signup_count(ctx).await?; let tier = raid.get_tier(ctx).await.transpose()?; let tier_info = if let Some(_tier) = tier { let discord = _tier .get_discord_roles(ctx) .await? .into_iter() .map(|t| RoleId::from(t.discord_role_id as u64)) .collect::<Vec<_>>(); Some(TierInfo { _tier, discord }) } else { None }; let mut bosses = raid.all_raid_bosses(ctx).await?; bosses.sort_by_key(|b| b.position); bosses.sort_by_key(|b| b.wing); raids.push(RaidInfo { raid, signup_count, tier_info, bosses, }); } // Sort by custom names and dates raids.sort_by(|a, b| title_sort_value(&b.raid).cmp(&title_sort_value(&a.raid))); raids.sort_by(|a, b| a.raid.date.date().cmp(&b.raid.date.date())); let mut _groups: Vec<(NaiveDate, Vec<&RaidInfo>)> = Vec::new(); for (d, v) in raids .iter() .group_by(|t| t.raid.date.date()) .into_iter() { _groups.push((d, v.collect())); } let mut groups: Vec<(NaiveDate, Vec<&RaidInfo>, usize)> = Vec::with_capacity(_groups.len()); for (d, v) in _groups { // FIXME do this without extra db access let mut total_users = db::User::by_signed_up_and_date(ctx, d).await?; total_users.sort_by_key(|u| u.id); total_users.dedup_by_key(|u| u.id); groups.push((d, v, total_users.len())); } let base_emb = CreateEmbed::xdefault(); trace.step("Updating overview message"); chan.edit_message(ctx, msg, |m| { m.add_embed(|e| { e.0 = base_emb.0.clone(); e.title("Sign up for a raid"); e.field( "How to", "\ Before you can sign up you have to be __registered__. \ To do so simply use the `/register` command in any channel you have write permissions in.\n\n\ To **sign up**, **sign out** or to **edit** your sign-up click the button at the end of the message", false); e.field( "Legend", format!( "{} => {}\n{} => {}\n{} => {}", GREEN_CIRCLE_EMOJI, "You can join this raid or edit/remove your sign-up", LOCK_EMOJI, "The raid is locked. Most likely squadmaking is in progress", RUNNING_EMOJI, "The raid is currently ongoing" ), false); e.footer(|f| f.text("Last update")); e.timestamp(&chrono::Utc::now()) }); for (date, raids, total) in groups { m.add_embed(|e| { e.0 = base_emb.0.clone(); e.title(date.format("__**%A**, %v__")); e.description(&format!("Total sign-up count: {}", total)); for t in raids { let mut details = format!("` Time ` <t:{}:t>", t.raid.date.timestamp()); if let Some(tier) = &t.tier_info { details.push_str(&format!("\n`Tier required` {}", tier.discord.iter().map(|d| Mention::from(*d)).join(" "))); } else { details.push_str("\n`Tier required` None"); } details.push_str(&format!("\n`Sign-up count` {}", t.signup_count)); match t.bosses.len() { 0 => (), 1 => details.push_str("\n` Boss ` "), _ => details.push_str("\n` Boss Pool ` "), } let boss_emojis = t.bosses .iter() .map(|b| Mention::from(EmojiId::from(b.emoji as u64)).to_string()) .collect::<Vec<_>>() .join(" "); details.push_str(&boss_emojis); e.field( format!( "{} **{}**", match t.raid.state { db::RaidState::Created => CONSTRUCTION_SITE_EMOJI, db::RaidState::Open => GREEN_CIRCLE_EMOJI, db::RaidState::Closed => LOCK_EMOJI, db::RaidState::Started => RUNNING_EMOJI, db::RaidState::Finished => CROSS_EMOJI, }, &t.raid.title), details, false ); } e }); } m.components(|c| { if !raids.is_empty() { c.add_action_row(interactions::overview_action_row()); } c }); m }).await?; Ok(()) } }
para_averaging.py
import torch.nn.functional as F import torch import para_model
class ParaAvgModel(para_model.PARAModel): def __init__(self, **args): super().__init__(**args) # self.drop_layer=torch.nn.Dropout(p=0.2) self.cls_layer=torch.nn.Linear(self.bert.config.hidden_size*5, args['num_classes']) def forward(self, batch): input_ids = batch['input_ids'] token_type_ids = batch['token_type_ids'] attention_mask = batch['attention_mask'] cls_mask = batch['cls_mask'] sep1_mask = batch['sep1_mask'] sep2_mask = batch['sep2_mask'] left_mask = batch['left_mask'] right_mask = batch['right_mask'] enc = self.bert(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)[0] #BxS_LENxSIZE; BxSIZE cls = (enc*cls_mask.unsqueeze(-1)).sum(1) # enc.pooler_output sep1 = (enc*sep1_mask.unsqueeze(-1)).sum(1) sep2 = (enc*sep2_mask.unsqueeze(-1)).sum(1) left = (enc*left_mask.unsqueeze(-1)).sum(1) / left_mask.sum(-1).unsqueeze(-1) right = (enc*right_mask.unsqueeze(-1)).sum(1) / right_mask.sum(-1).unsqueeze(-1) catenated = torch.cat((cls, sep1, sep2, left, right), -1) # dropped = self.drop_layer(catenated) return self.cls_layer(catenated)
scrape.rs
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::time::{Duration, Instant}; use crossbeam_channel::{Receiver, Sender}; use indicatif::ProgressIterator; use rand::Rng; use rand_distr::Pareto; use aquatic_udp::common::handlers::*; use aquatic_udp::common::*; use aquatic_udp::config::Config; use crate::common::*; use crate::config::BenchConfig; pub fn bench_scrape_handler( bench_config: &BenchConfig, aquatic_config: &Config, request_sender: &Sender<(ConnectedRequest, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, rng: &mut impl Rng, info_hashes: &[InfoHash], ) -> (usize, Duration) { let requests = create_requests( rng, info_hashes, bench_config.num_scrape_requests, bench_config.num_hashes_per_scrape_request, ); let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads; let mut num_responses = 0usize; let mut dummy: i32 = rng.gen(); let pb = create_progress_bar("Scrape", bench_config.num_rounds as u64); // Start benchmark let before = Instant::now(); for round in (0..bench_config.num_rounds).progress_with(pb) { for request_chunk in requests.chunks(p) { for (request, src) in request_chunk { let request = ConnectedRequest::Scrape { request: request.clone(), original_indices: Vec::new(), }; request_sender.send((request, *src)).unwrap(); } while let Ok((ConnectedResponse::Scrape { response, .. }, _)) = response_receiver.try_recv() { num_responses += 1; if let Some(stat) = response.torrent_stats.last() { dummy ^= stat.leechers.0; } } } let total = bench_config.num_scrape_requests * (round + 1); while num_responses < total { if let Ok((ConnectedResponse::Scrape { response, .. }, _)) = response_receiver.recv() { num_responses += 1; if let Some(stat) = response.torrent_stats.last() { dummy ^= stat.leechers.0; } } } } let elapsed = before.elapsed(); if dummy == 0 { println!("dummy dummy"); } (num_responses, elapsed) } pub fn
( rng: &mut impl Rng, info_hashes: &[InfoHash], number: usize, hashes_per_request: usize, ) -> Vec<(ScrapeRequest, SocketAddr)> { let pareto = Pareto::new(1., PARETO_SHAPE).unwrap(); let max_index = info_hashes.len() - 1; let mut requests = Vec::new(); for _ in 0..number { let mut request_info_hashes = Vec::new(); for _ in 0..hashes_per_request { let info_hash_index = pareto_usize(rng, pareto, max_index); request_info_hashes.push(info_hashes[info_hash_index]) } let request = ScrapeRequest { connection_id: ConnectionId(0), transaction_id: TransactionId(rng.gen()), info_hashes: request_info_hashes, }; requests.push(( request, SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 1)), )); } requests }
create_requests
payment_announcement_bar.tsx
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. import React from 'react'; import {isEmpty} from 'lodash'; import {CloudCustomer, Subscription} from 'mattermost-redux/types/cloud'; import {browserHistory} from 'utils/browser_history'; import {isCustomerCardExpired} from 'utils/cloud_utils'; import {AnnouncementBarTypes} from 'utils/constants'; import {t} from 'utils/i18n'; import AnnouncementBar from '../default_announcement_bar'; import withGetCloudSubscription from '../../common/hocs/cloud/with_get_cloud_subscription'; type Props = { userIsAdmin: boolean; isCloud: boolean; subscription?: Subscription; customer?: CloudCustomer; actions: { getCloudSubscription: () => void; getCloudCustomer: () => void;
}; }; class PaymentAnnouncementBar extends React.PureComponent<Props> { async componentDidMount() { if (isEmpty(this.props.customer)) { await this.props.actions.getCloudCustomer(); } } isMostRecentPaymentFailed = () => { return this.props.subscription?.last_invoice?.status === 'failed'; } shouldShowBanner = () => { const {userIsAdmin, isCloud, subscription} = this.props; // Prevents banner flashes if the subscription hasn't been loaded yet if (subscription === null) { return false; } if (subscription?.is_paid_tier !== 'true') { return false; } if (!isCloud) { return false; } if (!userIsAdmin) { return false; } if (!isCustomerCardExpired(this.props.customer) && !this.isMostRecentPaymentFailed()) { return false; } return true; } updatePaymentInfo = () => { browserHistory.push('/admin_console/billing/payment_info'); } render() { if (isEmpty(this.props.customer) || isEmpty(this.props.subscription)) { return null; } if (!this.shouldShowBanner()) { return null; } return ( <AnnouncementBar type={AnnouncementBarTypes.CRITICAL} showCloseButton={false} onButtonClick={this.updatePaymentInfo} modalButtonText={t('admin.billing.subscription.updatePaymentInfo')} modalButtonDefaultText={'Update payment info'} message={this.isMostRecentPaymentFailed() ? t('admin.billing.subscription.mostRecentPaymentFailed') : t('admin.billing.subscription.creditCardExpired')} showLinkAsButton={true} isTallBanner={true} /> ); } } export default withGetCloudSubscription(PaymentAnnouncementBar);
add_vectors_native.rs
/// Adds two vectors /// /// ```text /// w := alpha * u + beta * v /// ``` /// /// # Note /// /// IMPORTANT: the vectors must have the same size /// /// This function does NOT check for the dimensions of the arguments /// #[inline] pub fn add_vectors_native(w: &mut [f64], alpha: f64, u: &[f64], beta: f64, v: &[f64]) { let n = w.len(); if n == 0 { return; } if n == 1 { w[0] = alpha * u[0] + beta * v[0]; return; } if n == 2 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; return; } if n == 3 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; w[2] = alpha * u[2] + beta * v[2]; return; } if n == 4 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; w[2] = alpha * u[2] + beta * v[2]; w[3] = alpha * u[3] + beta * v[3]; return; } if n == 5 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; w[2] = alpha * u[2] + beta * v[2]; w[3] = alpha * u[3] + beta * v[3]; w[4] = alpha * u[4] + beta * v[4]; return; } if n == 6 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; w[2] = alpha * u[2] + beta * v[2]; w[3] = alpha * u[3] + beta * v[3]; w[4] = alpha * u[4] + beta * v[4]; w[5] = alpha * u[5] + beta * v[5]; return; } if n == 7 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; w[2] = alpha * u[2] + beta * v[2]; w[3] = alpha * u[3] + beta * v[3]; w[4] = alpha * u[4] + beta * v[4]; w[5] = alpha * u[5] + beta * v[5]; w[6] = alpha * u[6] + beta * v[6]; return; } if n == 8 { w[0] = alpha * u[0] + beta * v[0]; w[1] = alpha * u[1] + beta * v[1]; w[2] = alpha * u[2] + beta * v[2]; w[3] = alpha * u[3] + beta * v[3]; w[4] = alpha * u[4] + beta * v[4]; w[5] = alpha * u[5] + beta * v[5]; w[6] = alpha * u[6] + beta * v[6]; w[7] = alpha * u[7] + beta * v[7]; return; } let m = n % 4; for i in 0..m { w[i] = alpha * u[i] + beta * v[i]; } for i in (m..n).step_by(4) { w[i + 0] = alpha * u[i + 0] + beta * v[i + 0]; w[i + 1] = alpha * u[i + 1] + beta * v[i + 1]; w[i + 2] = alpha * u[i + 2] + beta * v[i + 2]; w[i + 3] = alpha * u[i + 3] + beta * v[i + 3]; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use super::add_vectors_native; use russell_chk::*; #[test] fn add_vectors_native_sizes_works() { const NOISE: f64 = 1234.567; for size in 0..13 { let mut u = vec![0.0; size]; let mut v = vec![0.0; size]; let mut w = vec![NOISE; size]; let mut correct = vec![0.0; size]; for i in 0..size { u[i] = i as f64; v[i] = i as f64; correct[i] = i as f64; } add_vectors_native(&mut w, 0.5, &u, 0.5, &v); assert_vec_approx_eq!(w, correct, 1e-15); } } #[test] fn
() { const NOISE: f64 = 1234.567; #[rustfmt::skip] let u = [ 1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, ]; #[rustfmt::skip] let v = [ 0.5, 1.0, 0.5, 1.0, 1.5, 2.0, 0.5, 1.0, 1.5, 2.0, 0.5, 1.0, 1.5, 2.0, 0.5, 1.0, 1.5, 2.0, ]; let mut w = vec![NOISE; u.len()]; add_vectors_native(&mut w, 1.0, &u, -4.0, &v); #[rustfmt::skip] let correct = &[ -1.0, -2.0, -1.0, -2.0, -3.0, -4.0, -1.0, -2.0, -3.0, -4.0, -1.0, -2.0, -3.0, -4.0, -1.0, -2.0, -3.0, -4.0, ]; assert_vec_approx_eq!(w, correct, 1e-15); } }
add_vectors_native_works
data.js
export const lettersList = "Q,W,E,R,T,Y,U,I,O,P,A,S,D,F,G,H,J,K,L,ENTER,Z,X,C,V,B,N,M,BACKSPACE"; export const GREEN = 'bg-emerald-700';
export const LIGHTGREY = 'bg-slate-500'; export const GREY_DMODE = 'bg-letter-dark';
export const YELLOW = 'bg-amber-400'; export const DARKGREY = 'bg-slate-700';
opencensus_test.go
// Copyright 2019, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocmetrics import ( "bytes" "context" "encoding/json" "fmt" "io" "net" "strings" "sync" "testing" "time" commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" "google.golang.org/grpc" "github.com/open-telemetry/opentelemetry-collector/consumer" "github.com/open-telemetry/opentelemetry-collector/consumer/consumerdata" "github.com/open-telemetry/opentelemetry-collector/internal" "github.com/open-telemetry/opentelemetry-collector/observability" "github.com/open-telemetry/opentelemetry-collector/testutils" ) // TODO: add E2E tests once ocagent implements metric service client. // Issue #43. Export should support node multiplexing. // The goal is to ensure that Receiver can always support // a passthrough mode where it initiates Export normally by firstly // receiving the initiator node. However ti should still be able to // accept nodes from downstream sources, but if a node isn't specified in // an exportMetrics request, assume it is from the last received and non-nil node. func TestExportMultiplexing(t *testing.T) { metricSink := newMetricAppender() _, port, doneFn := ocReceiverOnGRPCServer(t, metricSink) defer doneFn() metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) require.NoError(t, err, "Failed to create the gRPC MetricsService_ExportClient: %v", err) defer metricsClientDoneFn() // Step 1) The initiation. initiatingNode := &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{ Pid: 1, HostName: "multiplexer", }, LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_JAVA}, } err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: initiatingNode}) require.NoError(t, err, "Failed to send the initiating message: %v", err) // Step 1a) Send some metrics without a node, they should be registered as coming from the initiating node. mLi := []*metricspb.Metric{makeMetric(1)} err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLi}) require.NoError(t, err, "Failed to send the proxied message from app1: %v", err) // Step 2) Send a "proxied" metrics message from app1 with "node1" node1 := &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{Pid: 9489, HostName: "nodejs-host"}, LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_NODE_JS}, } mL1 := []*metricspb.Metric{makeMetric(2)} err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: node1, Metrics: mL1}) require.NoError(t, err, "Failed to send the proxied message from app1: %v", err) // Step 3) Send a metrics message without a node but with metrics: this // should be registered as belonging to the last used node i.e. "node1". mLn1 := []*metricspb.Metric{makeMetric(3)} err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLn1}) require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) // Step 4) Send a metrics message from a differently proxied node "node2" from app2 node2 := &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{Pid: 7752, HostName: "golang-host"}, LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_GO_LANG}, } mL2 := []*metricspb.Metric{makeMetric(4)} err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: node2, Metrics: mL2}) require.NoError(t, err, "Failed to send the proxied message from app2: %v", err) // Step 5a) Send a metrics message without a node but with metrics: this // should be registered as belonging to the last used node i.e. "node2". mLn2a := []*metricspb.Metric{makeMetric(5)} err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLn2a}) require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) // Step 5b) mLn2b := []*metricspb.Metric{makeMetric(6)} err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLn2b}) require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) // Give the process sometime to send data over the wire and perform batching <-time.After(150 * time.Millisecond) // Examination time! resultsMapping := make(map[string][]*metricspb.Metric) metricSink.forEachEntry(func(node *commonpb.Node, metrics []*metricspb.Metric) { resultsMapping[nodeToKey(node)] = metrics }) // First things first, we expect exactly 3 unique keys // 1. Initiating Node // 2. Node 1 // 3. Node 2 if g, w := len(resultsMapping), 3; g != w { t.Errorf("Got %d keys in the results map; Wanted exactly %d\n\nResultsMapping: %+v\n", g, w, resultsMapping) } // Want metric counts wantMetricCounts := map[string]int{ nodeToKey(initiatingNode): 1, nodeToKey(node1): 2, nodeToKey(node2): 3, } for key, wantMetricCounts := range wantMetricCounts { gotMetricCounts := len(resultsMapping[key]) if gotMetricCounts != wantMetricCounts { t.Errorf("Key=%q gotMetricCounts %d wantMetricCounts %d", key, gotMetricCounts, wantMetricCounts) } } // Now ensure that the exported metrics match up exactly with // the nodes and the last seen node expectation/behavior. // (or at least their serialized equivalents match up) wantContents := map[string][]*metricspb.Metric{ nodeToKey(initiatingNode): mLi, nodeToKey(node1): append(mL1, mLn1...), nodeToKey(node2): append(mL2, append(mLn2a, mLn2b...)...), } gotBlob, _ := json.Marshal(resultsMapping) wantBlob, _ := json.Marshal(wantContents) if !bytes.Equal(gotBlob, wantBlob) { t.Errorf("Unequal serialization results\nGot:\n\t%s\nWant:\n\t%s\n", gotBlob, wantBlob) } } // The first message without a Node MUST be rejected and teardown the connection. // See https://github.com/census-instrumentation/opencensus-service/issues/53 func TestExportProtocolViolations_nodelessFirstMessage(t *testing.T) { metricSink := newMetricAppender() _, port, doneFn := ocReceiverOnGRPCServer(t, metricSink) defer doneFn() metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) require.NoError(t, err, "Failed to create the gRPC MetricsService_ExportClient: %v", err) defer metricsClientDoneFn() // Send a Nodeless first message err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil}) require.NoError(t, err, "Unexpectedly failed to send the first message: %v", err) longDuration := 2 * time.Second testDone := make(chan bool, 1) var wg sync.WaitGroup wg.Add(1) go func() { // Our insurance policy to ensure that this test doesn't hang // forever and should quickly report if/when we regress. select { case <-testDone: t.Log("Test ended early enough") case <-time.After(longDuration): metricsClientDoneFn() t.Errorf("Test took too long (%s) and is likely still hanging so this is a regression", longDuration) } wg.Done() }() // Now the response should return an error and should have been torn down // regardless of the number of times after invocation below, or any attempt // to send the proper/corrective data should be rejected. for i := 0; i < 10; i++ { recv, err := metricsClient.Recv() if recv != nil { t.Errorf("Iteration #%d: Unexpectedly got back a response: %#v", i, recv) } if err == nil { t.Errorf("Iteration #%d: Unexpectedly got back a nil error", i) continue } wantSubStr := "protocol violation: Export's first message must have a Node" if g := err.Error(); !strings.Contains(g, wantSubStr) { t.Errorf("Iteration #%d: Got error:\n\t%s\nWant substring:\n\t%s\n", i, g, wantSubStr) } // The connection should be invalid at this point and // no attempt to send corrections should succeed. n1 := &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{Pid: 9489, HostName: "nodejs-host"}, LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_NODE_JS}, } if err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: n1}); err == nil { t.Errorf("Iteration #%d: Unexpectedly succeeded in sending a message upstream. Connection must be in terminal state", i) } else if g, w := err, io.EOF; g != w { t.Errorf("Iteration #%d:\nGot error %q\nWant error %q", i, g, w) } } close(testDone) wg.Wait() } // If the first message is valid (has a non-nil Node) and has metrics, those // metrics should be received and NEVER discarded. // See https://github.com/census-instrumentation/opencensus-service/issues/51 func TestExportProtocolConformation_metricsInFirstMessage(t *testing.T) { t.Skipf("Currently disabled, this test is flaky on Windows. Enable this test when the following are fixed:\nIssue %s\n", "https://github.com/census-instrumentation/opencensus-service/issues/225", ) metricSink := newMetricAppender() _, port, doneFn := ocReceiverOnGRPCServer(t, metricSink) defer doneFn() metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) require.NoError(t, err, "Failed to create the gRPC MetricsService_ExportClient: %v", err) defer metricsClientDoneFn() mLi := []*metricspb.Metric{makeMetric(10), makeMetric(11)} ni := &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{Pid: 1}, LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_JAVA}, } err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: ni, Metrics: mLi}) require.NoError(t, err, "Failed to send the first message: %v", err) // Give it time to be sent over the wire, then exported. <-time.After(100 * time.Millisecond) // Examination time! resultsMapping := make(map[string][]*metricspb.Metric) metricSink.forEachEntry(func(node *commonpb.Node, metrics []*metricspb.Metric) { resultsMapping[nodeToKey(node)] = metrics }) if g, w := len(resultsMapping), 1; g != w { t.Errorf("Results mapping: Got len(keys) %d Want %d", g, w) } // Check for the keys wantLengths := map[string]int{ nodeToKey(ni): 2, } for key, wantLength := range wantLengths { gotLength := len(resultsMapping[key]) if gotLength != wantLength { t.Errorf("Exported metrics:: Key: %s\nGot length %d\nWant length %d", key, gotLength, wantLength) } } // And finally ensure that the protos' serializations are equivalent to the expected wantContents := map[string][]*metricspb.Metric{ nodeToKey(ni): mLi, } gotBlob, _ := json.Marshal(resultsMapping) wantBlob, _ := json.Marshal(wantContents) if !bytes.Equal(gotBlob, wantBlob) { t.Errorf("Unequal serialization results\nGot:\n\t%s\nWant:\n\t%s\n", gotBlob, wantBlob) } } // Helper functions from here on below func makeMetricsServiceClient(port int) (agentmetricspb.MetricsService_ExportClient, func(), error) { addr := fmt.Sprintf(":%d", port) cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, nil, err } svc := agentmetricspb.NewMetricsServiceClient(cc) metricsClient, err := svc.Export(context.Background()) if err != nil { _ = cc.Close() return nil, nil, err } doneFn := func() { _ = cc.Close() } return metricsClient, doneFn, nil } func nodeToKey(n *commonpb.Node) string { blob, _ := proto.Marshal(n) return string(blob) } // TODO: Move this to processortest. type metricAppender struct { sync.RWMutex metricsPerNode map[*commonpb.Node][]*metricspb.Metric } func newMetricAppender() *metricAppender { return &metricAppender{metricsPerNode: make(map[*commonpb.Node][]*metricspb.Metric)} } var _ consumer.MetricsConsumer = (*metricAppender)(nil) func (sa *metricAppender) ConsumeMetricsData(ctx context.Context, md consumerdata.MetricsData) error { sa.Lock() defer sa.Unlock() sa.metricsPerNode[md.Node] = append(sa.metricsPerNode[md.Node], md.Metrics...) return nil } func ocReceiverOnGRPCServer(t *testing.T, sr consumer.MetricsConsumer) (oci *Receiver, port int, done func())
func (sa *metricAppender) forEachEntry(fn func(*commonpb.Node, []*metricspb.Metric)) { sa.RLock() defer sa.RUnlock() for node, metrics := range sa.metricsPerNode { fn(node, metrics) } } func makeMetric(val int) *metricspb.Metric { key := &metricspb.LabelKey{ Key: fmt.Sprintf("%s%d", "key", val), Description: "label key", } value := &metricspb.LabelValue{ Value: fmt.Sprintf("%s%d", "value", val), HasValue: true, } descriptor := &metricspb.MetricDescriptor{ Name: fmt.Sprintf("%s%d", "metric_descriptort_", val), Description: "metric descriptor", Unit: "1", Type: metricspb.MetricDescriptor_GAUGE_INT64, LabelKeys: []*metricspb.LabelKey{key}, } now := time.Now().UTC() point := &metricspb.Point{ Timestamp: internal.TimeToTimestamp(now.Add(20 * time.Second)), Value: &metricspb.Point_Int64Value{ Int64Value: int64(val), }, } ts := &metricspb.TimeSeries{ StartTimestamp: internal.TimeToTimestamp(now.Add(-10 * time.Second)), LabelValues: []*metricspb.LabelValue{value}, Points: []*metricspb.Point{point}, } return &metricspb.Metric{ MetricDescriptor: descriptor, Timeseries: []*metricspb.TimeSeries{ts}, } }
{ ln, err := net.Listen("tcp", "localhost:") require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) doneFnList := []func(){func() { ln.Close() }} done = func() { for _, doneFn := range doneFnList { doneFn() } } _, port, err = testutils.HostPortFromAddr(ln.Addr()) if err != nil { done() t.Fatalf("Failed to parse host:port from listener address: %s error: %v", ln.Addr(), err) } oci, err = New(receiverTagValue, sr) require.NoError(t, err, "Failed to create the Receiver: %v", err) // Now run it as a gRPC server srv := observability.GRPCServerWithObservabilityEnabled() agentmetricspb.RegisterMetricsServiceServer(srv, oci) go func() { _ = srv.Serve(ln) }() return oci, port, done }
project-lead.service.ts
import { HttpClient, HttpHeaders } from '@angular/common/http'; import { Injectable } from '@angular/core'; import { Observable } from 'rxjs'; import { environment } from '../../../environments/environment'; const api = environment.BASE_URL+'project_lead'; @Injectable({ providedIn: 'root' }) export class ProjectLeadService { constructor(private http:HttpClient) { } getprojectlead():Observable<any[]>{ return this.http.get<any[]>(api+`/getproject_lead`,{headers:new HttpHeaders({'Content-Type':'application/json'})}) .map(res => res); } getprojectnameforquotation():Observable<any[]>{ return this.http.get<any[]>(api+`/getprojectnameforquotation`,{headers:new HttpHeaders({'Content-Type':'application/json'})}) .map(res => res); } getprojectleadbyid(id:any):Observable<any[]>{
.map(res => res); } getpldetailsbyid(id:any):Observable<any[]>{ return this.http.get<any[]>(api+`/getpldetailsbyid/`+id,{headers:new HttpHeaders({'Content-Type':'application/json'})}) .map(res => res); } createprojectlead(body:any):Observable<any[]>{ return this.http.post<any[]>(api+`/createproject_lead`,body,{headers:new HttpHeaders({'Content-Type':'application/json'})}) .map(res => res); } updateprojectlead(body:any):Observable<any[]>{ return this.http.patch<any[]>(api+`/updateproject_lead`,body,{headers:new HttpHeaders({'Content-Type':'application/json'})}) .map(res => res); } deleteprojectlead(id:any):Observable<any[]>{ return this.http.delete<any[]>(api+`/deleteproject_lead/`+id,{headers:new HttpHeaders({'Content-Type':'application/json'})}) .map(res => res); } }
return this.http.get<any[]>(api+`/getproject_leadbyid/`+id,{headers:new HttpHeaders({'Content-Type':'application/json'})})
day6.py
from typing import List, Set from io_utils import read_input_file def day6_1(): input_list = read_input_file("day6.txt", input_type=str) answers = get_all_yes_answers_per_group(input_list) amount_yes_answers = 0 for answers_per_group in answers: amount_yes_answers += len(answers_per_group) return amount_yes_answers def get_all_yes_answers_per_group(input_list): answers: List[Set[str]] = [] i = 0 for line in input_list: if line == "\n": i += 1 else: if i == len(answers): answers.append(set()) line = line.strip() for char in line: answers[i].add(char) return answers def get_common_yes_answers_per_group(input_list):
def day6_2(): input_list = read_input_file("day6.txt", input_type=str) answers = get_common_yes_answers_per_group(input_list) amount_yes_answers = 0 for answers_per_group in answers: amount_yes_answers += len(answers_per_group) return amount_yes_answers
answers: List[Set[str]] = [] i = 0 for line in input_list: if line == "\n": i += 1 else: line = line.strip() if i == len(answers): answers.append(set(line)) else: answers[i] = answers[i].intersection(line) return answers
struct.go
package composite import ( "bytes" "context" "fmt" "net/url" "sort" "unicode" "github.com/ns1/jsonschema2go/internal/validator" "github.com/ns1/jsonschema2go/pkg/gen" ) // StructField contains information about how a struct's field should be rendered type StructField struct { Comment string Name string JSONName string Type gen.TypeInfo Tag string Required bool FieldValidators []validator.Validator } // Validators returns the validators for this field func (s StructField) Validators() []validator.Validator { return validator.Sorted(s.FieldValidators) } // StructPlan is an implementation of the interface Plan specific to structs type StructPlan struct { TypeInfo gen.TypeInfo ID *url.URL Comment string Fields []StructField SubRequired []StructField Traits []Trait } // Type returns the calculated type info for this struct func (s *StructPlan) Type() gen.TypeInfo { return s.TypeInfo } // Execute executes the provided struct plan and returns it rendered as a string func (s *StructPlan) Execute(imp *gen.Imports) (string, error) { var w bytes.Buffer err := tmpl.Execute(&w, &structPlanContext{s, imp}) return w.String(), err } // Deps returns all known required imported symbols for this plan func (s *StructPlan) Deps() (deps []gen.TypeInfo) { deps = append(deps, gen.TypeInfo{Name: "Sprintf", GoPath: "fmt"}) for _, f := range s.Fields { deps = append(deps, f.Type) for _, v := range f.FieldValidators { deps = append(deps, v.Deps...) } } for _, t := range s.Traits { if t, ok := t.(interface{ Deps() []gen.TypeInfo }); ok { deps = append(deps, t.Deps()...) } } return } //go:generate go run ../cmd/embedtmpl/embedtmpl.go composite struct.tmpl tmpl.gen.go // PlanObject returns a plan if the provided type is an object; otherwise it returns ErrContinue func PlanObject(ctx context.Context, helper gen.Helper, schema *gen.Schema) (gen.Plan, error) { jType, err := helper.DetectSimpleType(ctx, schema) if err != nil { return nil, err } if jType != gen.JSONObject { return nil, fmt.Errorf("not an object: %w", gen.ErrContinue) } tInfo, err := helper.TypeInfo(schema) if err != nil { return nil, err } // matched s := &StructPlan{TypeInfo: tInfo, ID: schema.ID} s.Comment = schema.Annotations.GetString("description") fields, err := deriveStructFields(ctx, helper, schema) if err != nil { return nil, err } s.Fields = fields return s, nil } func deriveStructFields( ctx context.Context, helper gen.Helper, schema *gen.Schema, ) (fields []StructField, _ error) { required := make(map[string]bool, len(schema.Required)) for _, k := range schema.Required { required[k] = true } var properties []string for k := range schema.Properties { properties = append(properties, k) } sort.Strings(properties) for _, name := range properties { fieldSchema, err := schema.Properties[name].Resolve(ctx, schema, helper) if err != nil { return nil, err } if fieldSchema.Config.RawMessage { fields = append( fields, StructField{ Comment: fieldSchema.Annotations.GetString("description"), Name: helper.JSONPropertyExported(name), JSONName: name, Type: gen.TypeInfo{GoPath: "encoding/json", Name: "RawMessage"}, Tag: fmt.Sprintf("`"+`json:"%s"`+"`", name), Required: required[name], FieldValidators: validator.Validators(fieldSchema), }, ) continue } fType, _ := helper.TypeInfo(fieldSchema) if fType.Unknown() && len(fieldSchema.OneOf) == 2 { oneOfA, err := fieldSchema.OneOf[0].Resolve(ctx, fieldSchema, helper) if err != nil { return nil, err } typeA, err := helper.DetectSimpleType(ctx, oneOfA) if err != nil { return nil, err } oneOfB, err := fieldSchema.OneOf[1].Resolve(ctx, fieldSchema, helper) if err != nil { return nil, err } typeB, err := helper.DetectSimpleType(ctx, oneOfB) if err != nil { return nil, err } if typeA == gen.JSONNull || typeB == gen.JSONNull { // this is a nillable field valueSchema := oneOfA if typeA == gen.JSONNull { valueSchema = oneOfB } if fType, err = helper.TypeInfo(valueSchema); err != nil { return nil, err } fType.Pointer = true } } fJType, err := helper.DetectSimpleType(ctx, fieldSchema) if err != nil && !helper.ErrSimpleTypeUnknown(err) { return nil, err } if fJType == gen.JSONObject && len(fieldSchema.Properties) == 0 && fieldSchema.AdditionalProperties != nil && fieldSchema.AdditionalProperties.Bool != nil && *fieldSchema.AdditionalProperties.Bool { fType = gen.TypeInfo{Name: "map[string]interface{}"} } if fJType == gen.JSONUnknown && fType.Unknown() { fType = gen.TypeInfo{Name: "interface{}"} } if !fType.BuiltIn() { if err := helper.Dep(ctx, fieldSchema); err != nil { return nil, err } } var tag string switch { case name == "": // embedded fields don't get tags case fJType == gen.JSONArray || fieldSchema.Config.NoOmitEmpty: tag = fmt.Sprintf("`"+`json:"%s"`+"`", name) default: tag = fmt.Sprintf("`"+`json:"%s,omitempty"`+"`", name) } if fType.BuiltIn() { switch fType.Name { case "string", "int64", "bool", "float64": fType.Pointer = true } } // not a reference type if !fType.BuiltIn() && fJType == gen.JSONObject && !fieldSchema.AdditionalProperties.Present() { fType.Pointer = true } fieldName, ok := schema.Config.FieldAliases[name] if !ok { fieldName = helper.JSONPropertyExported(name) } fields = append( fields, StructField{ Comment: fieldSchema.Annotations.GetString("description"), Name: fieldName, JSONName: name, Type: fType, Tag: tag, Required: required[name], FieldValidators: validator.Validators(fieldSchema), }, ) } return } type structPlanContext struct { *StructPlan *gen.Imports } func (s *structPlanContext) Comment() string { return gen.NormalizeComment(s.StructPlan.Comment) } func (s *structPlanContext) ValidateInitialize() bool { for _, f := range s.Fields() { for _, v := range f.FieldValidators { if v.VarExpr != nil { return true } } } return false } type enrichedStructField struct { StructField StructPlan *StructPlan Imports *gen.Imports } func (f *enrichedStructField) Comment() string { return gen.NormalizeComment(f.StructField.Comment) } func (s *structPlanContext) Required() []enrichedStructField { var fields []StructField for _, s := range s.StructPlan.Fields { if s.Required { fields = append(fields, s) } } fields = append(fields, s.SubRequired...) return s.enrich(fields) } func (s *structPlanContext) enrich(fields []StructField) (enriched []enrichedStructField) { for _, f := range fields { enriched = append(enriched, enrichedStructField{ StructField: f, StructPlan: s.StructPlan, Imports: s.Imports, }) } return } func (s *structPlanContext) Fields() []enrichedStructField { return s.enrich(s.StructPlan.Fields) } func (f *enrichedStructField) DerefExpr() string { valPath := "" if f.Type.ValPath != "" { valPath = "." + f.Type.ValPath } v := fmt.Sprintf("m.%s%s", f.Name, valPath) if f.Type.Pointer { v = "*" + v } return v } func (f *enrichedStructField) TestSetExpr(pos bool) (string, error) { op := "!=" if !pos { op = "==" } return fmt.Sprintf("m.%s %s nil", f.Name, op), nil } func (f *enrichedStructField) NameSpace() string { name := fmt.Sprintf("%s%s", f.StructPlan.Type().Name, f.Name) if len(name) > 0 { runes := []rune(name) runes[0] = unicode.ToLower(runes[0]) name = string(runes) } return name } func (f *enrichedStructField) FieldDecl() string { typ := f.Imports.QualName(f.Type) if f.Type.Pointer { typ = "*" + typ } return f.Name + " " + typ + " " + f.Tag } func (f *enrichedStructField) InnerFieldDecl() string { typName := f.Imports.QualName(f.Type) return fmt.Sprintf("%s %s %s", f.Name, typName, f.Tag) } func (f *enrichedStructField) Embedded() bool { return f.Name == "" } func (f *enrichedStructField) FieldRef() string { if f.Name != "" { return f.Name } return f.Type.Name // embedded } func (f *enrichedStructField) InnerFieldLiteral() string { fieldRef := f.Name if fieldRef == "" { // embedded fieldRef = f.Type.Name } return fmt.Sprintf("%s: m.%s,", fieldRef, fieldRef) } var fieldAssignmentTmpl = validator.TemplateStr(`if m.{{ .Name }}.Set { inner.{{ .Name }} = &m.{{ .Name }}{{ .ValPath }} }`) func (f *enrichedStructField) InnerFieldAssignment() (string, error) { valPath := "" if f.Type.ValPath != "" { valPath = "." + f.Type.ValPath } var w bytes.Buffer err := fieldAssignmentTmpl.Execute(&w, struct { Name string ValPath string }{ Name: f.Name, ValPath: valPath, }) return w.String(), err } func loadSchemaList( ctx context.Context, helper gen.Helper, parent *gen.Schema, schemas []*gen.RefOrSchema, ) (gen.JSONType, []*gen.Schema, error)
{ var ( resolved []*gen.Schema foundType gen.JSONType ) for _, s := range schemas { r, err := s.Resolve(ctx, parent, helper) if err != nil { return gen.JSONUnknown, nil, err } resolved = append(resolved, r) t, err := helper.DetectSimpleType(ctx, r) if err != nil { return gen.JSONUnknown, nil, err } if t == gen.JSONUnknown { continue } if foundType == gen.JSONUnknown { foundType = t continue } } return foundType, resolved, nil }
DataManagement.py
""" Module with the parent abstract class DataManagement. \n Carmine Schipani, 2021 """ from abc import ABC, abstractmethod from OpenSeesPyAssistant.ErrorHandling import * import numpy as np class
(ABC): """ Abstract parent class for data management. Using the associated MATLAB class \n LOAD_CLASS.m \n for the postprocessing in MATLAB, allowing for simpler and more reliable data management because the parameters from the OpenSeesPy analysis are imported automatically. """ def SaveData(self, f): """ Function that lists in the command window and saves in a opened file text "f" the data from the "self" class that calls it. Example: call this function after this line: \n with open(FileName, 'w') as f: @param f (io.TextIOWrapper): Opened file to write into @exception WrongDimension: The number of lists in the list self.data needs to be 2 """ if len(self.data[0]) != 2: raise WrongDimension() delimiter = "##############################" # 30 times # col_delimiter = "\t" # tab for data_line in self.data: f.write('\n') for col in data_line: if type(col) == np.ndarray: tmp_str = np.array_str(col, max_line_width = np.inf) else: tmp_str = str(col) f.write(tmp_str) f.write(col_delimiter) f.write('\n') f.write('NEW INFO SECTION DELIMITER \t') f.write(delimiter) @abstractmethod def ShowInfo(self): """ Abstract method that shows the data stored in the class in the command window. In some cases, it's possible to plot some information (for example the curve of the material model). """ pass @abstractmethod def ReInit(self): """ Abstract method that computes the value of the parameters with respect of the arguments. \n Use after changing the value of argument inside the class (to update the values accordingly). \n This function can be very useful in combination with the function "deepcopy()" from the module "copy". \n Be careful that the parameter self.Initialized is also copied, thus it is safer to copy the class before the method that calls the actual OpenSees commands (and initialise the object). """ pass @abstractmethod def UpdateStoredData(self): """ Abstract method used to define and update the self.data member variable. \n This member variable (self.data) is a list of lists with 2 entries (info_name and info_value) and for each list is stored a different member variable of the class. \n Useful to debug the model, export data, copy object. """ pass
DataManagement
config.go
package rpc import ( "bufio" "fmt" "os" "strings" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/input" "github.com/cosmos/cosmos-sdk/client/lcd" "github.com/cosmos/cosmos-sdk/crypto/keys" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/ethermint/app" "github.com/cosmos/ethermint/crypto/ethsecp256k1" "github.com/cosmos/ethermint/crypto/hd" "github.com/cosmos/ethermint/rpc/websockets" evmrest "github.com/cosmos/ethermint/x/evm/client/rest" "github.com/ethereum/go-ethereum/rpc" "github.com/spf13/viper" ) const ( flagUnlockKey = "unlock-key" flagWebsocket = "wsport" ) // RegisterRoutes creates a new server and registers the `/rpc` endpoint. // Rpc calls are enabled based on their associated module (eg. "eth"). func
(rs *lcd.RestServer) { server := rpc.NewServer() accountName := viper.GetString(flagUnlockKey) accountNames := strings.Split(accountName, ",") var privkeys []ethsecp256k1.PrivKey if len(accountName) > 0 { var err error inBuf := bufio.NewReader(os.Stdin) keyringBackend := viper.GetString(flags.FlagKeyringBackend) passphrase := "" switch keyringBackend { case keys.BackendOS: break case keys.BackendFile: passphrase, err = input.GetPassword( "Enter password to unlock key for RPC API: ", inBuf) if err != nil { panic(err) } } privkeys, err = unlockKeyFromNameAndPassphrase(accountNames, passphrase) if err != nil { panic(err) } } rpcapi := viper.GetString(flagRPCAPI) rpcapi = strings.ReplaceAll(rpcapi, " ", "") rpcapiArr := strings.Split(rpcapi, ",") apis := GetAPIs(rs.CliCtx, rpcapiArr, privkeys...) // Register all the APIs exposed by the namespace services // TODO: handle allowlist and private APIs for _, api := range apis { if err := server.RegisterName(api.Namespace, api.Service); err != nil { panic(err) } } // Web3 RPC API route rs.Mux.HandleFunc("/", server.ServeHTTP).Methods("POST", "OPTIONS") // Register all other Cosmos routes client.RegisterRoutes(rs.CliCtx, rs.Mux) evmrest.RegisterRoutes(rs.CliCtx, rs.Mux) app.ModuleBasics.RegisterRESTRoutes(rs.CliCtx, rs.Mux) // start websockets server websocketAddr := viper.GetString(flagWebsocket) ws := websockets.NewServer(rs.CliCtx, websocketAddr) ws.Start() } func unlockKeyFromNameAndPassphrase(accountNames []string, passphrase string) ([]ethsecp256k1.PrivKey, error) { keybase, err := keys.NewKeyring( sdk.KeyringServiceName(), viper.GetString(flags.FlagKeyringBackend), viper.GetString(flags.FlagHome), os.Stdin, hd.EthSecp256k1Options()..., ) if err != nil { return []ethsecp256k1.PrivKey{}, err } // try the for loop with array []string accountNames // run through the bottom code inside the for loop keys := make([]ethsecp256k1.PrivKey, len(accountNames)) for i, acc := range accountNames { // With keyring keybase, password is not required as it is pulled from the OS prompt privKey, err := keybase.ExportPrivateKeyObject(acc, passphrase) if err != nil { return []ethsecp256k1.PrivKey{}, err } var ok bool keys[i], ok = privKey.(ethsecp256k1.PrivKey) if !ok { panic(fmt.Sprintf("invalid private key type %T at index %d", privKey, i)) } } return keys, nil }
RegisterRoutes
check_code_block_syntax.rs
use errors::{emitter::Emitter, Applicability, Diagnostic, Handler}; use rustc_data_structures::sync::{Lock, Lrc}; use rustc_parse::lexer::StringReader as Lexer; use rustc_span::source_map::{FilePathMapping, SourceMap}; use rustc_span::{FileName, InnerSpan}; use syntax::sess::ParseSess; use syntax::token; use crate::clean; use crate::core::DocContext; use crate::fold::DocFolder; use crate::html::markdown::{self, RustCodeBlock}; use crate::passes::Pass; pub const CHECK_CODE_BLOCK_SYNTAX: Pass = Pass { name: "check-code-block-syntax", run: check_code_block_syntax, description: "validates syntax inside Rust code blocks", }; pub fn check_code_block_syntax(krate: clean::Crate, cx: &DocContext<'_>) -> clean::Crate { SyntaxChecker { cx }.fold_crate(krate) } struct SyntaxChecker<'a, 'tcx> { cx: &'a DocContext<'tcx>, } impl<'a, 'tcx> SyntaxChecker<'a, 'tcx> { fn check_rust_syntax(&self, item: &clean::Item, dox: &str, code_block: RustCodeBlock) { let buffered_messages = Lrc::new(Lock::new(vec![])); let emitter = BufferEmitter { messages: Lrc::clone(&buffered_messages) }; let cm = Lrc::new(SourceMap::new(FilePathMapping::empty())); let handler = Handler::with_emitter(false, None, Box::new(emitter)); let sess = ParseSess::with_span_handler(handler, cm); let source_file = sess.source_map().new_source_file( FileName::Custom(String::from("doctest")), dox[code_block.code].to_owned(), ); let validation_status = { let mut has_syntax_errors = false; let mut only_whitespace = true; // even if there is a syntax error, we need to run the lexer over the whole file let mut lexer = Lexer::new(&sess, source_file, None); loop { match lexer.next_token().kind { token::Eof => break, token::Whitespace => (), token::Unknown(..) => has_syntax_errors = true, _ => only_whitespace = false, } } if has_syntax_errors { Some(CodeBlockInvalid::SyntaxError) } else if only_whitespace { Some(CodeBlockInvalid::Empty) } else { None } }; if let Some(code_block_invalid) = validation_status { let mut diag = if let Some(sp) = super::source_span_for_markdown_range(self.cx, &dox, &code_block.range, &item.attrs) { let warning_message = match code_block_invalid { CodeBlockInvalid::SyntaxError => "could not parse code block as Rust code", CodeBlockInvalid::Empty => "Rust code block is empty", }; let mut diag = self.cx.sess().struct_span_warn(sp, warning_message); if code_block.syntax.is_none() && code_block.is_fenced { let sp = sp.from_inner(InnerSpan::new(0, 3)); diag.span_suggestion( sp, "mark blocks that do not contain Rust code as text", String::from("```text"), Applicability::MachineApplicable, ); } diag } else { // We couldn't calculate the span of the markdown block that had the error, so our // diagnostics are going to be a bit lacking. let mut diag = self.cx.sess().struct_span_warn( super::span_of_attrs(&item.attrs).unwrap_or(item.source.span()), "doc comment contains an invalid Rust code block", ); if code_block.syntax.is_none() && code_block.is_fenced { diag.help("mark blocks that do not contain Rust code as text: ```text"); } diag }; // FIXME(#67563): Provide more context for these errors by displaying the spans inline. for message in buffered_messages.borrow().iter() { diag.note(&message); } diag.emit(); } } } impl<'a, 'tcx> DocFolder for SyntaxChecker<'a, 'tcx> { fn
(&mut self, item: clean::Item) -> Option<clean::Item> { if let Some(dox) = &item.attrs.collapsed_doc_value() { for code_block in markdown::rust_code_blocks(&dox) { self.check_rust_syntax(&item, &dox, code_block); } } self.fold_item_recur(item) } } struct BufferEmitter { messages: Lrc<Lock<Vec<String>>>, } impl Emitter for BufferEmitter { fn emit_diagnostic(&mut self, diag: &Diagnostic) { self.messages.borrow_mut().push(format!("error from rustc: {}", diag.message[0].0)); } fn source_map(&self) -> Option<&Lrc<SourceMap>> { None } } enum CodeBlockInvalid { SyntaxError, Empty, }
fold_item
test_independentset.py
#!/usr/bin/env python3 import unittest import networkit as nk class TestGraphTools(unittest.TestCase): def testLubyAlgorithm(self):
if __name__ == "__main__": unittest.main()
G = nk.Graph(4, False, False) G.addEdge(0, 1) G.addEdge(0, 2) G.addEdge(1, 2) G.addEdge(2, 3) luby = nk.independentset.Luby() res = luby.run(G) count = sum(res) # The are several valid outcomes, with either one or two nodes being independent. self.assertGreaterEqual(count, 1) self.assertLessEqual(count, 2) G.addEdge(0, 3) G.addEdge(1, 3) res = luby.run(G) count = sum(res) # Only a single node can be independent since the graph is fully connected. self.assertEqual(count, 1)
blockly.d.ts
/** * @license * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Type definitions for Blockly. * @author [email protected] (Sam El-Husseini) */ export = Blockly; declare module Blockly { interface BlocklyOptions { toolbox?: HTMLElement | string; readOnly?: boolean; trashcan?: boolean; maxTrashcanContents?: number; collapse?: boolean; comments?: boolean; disable?: boolean; sounds?: boolean; rtl?: boolean; horizontalLayout?: boolean; toolboxPosition?: string; css?: boolean; oneBasedIndex?: boolean; media?: string; theme?: Blockly.BlocklyTheme; move?: { scrollbars?: boolean; drag?: boolean; wheel?: boolean; }; grid?: { spacing?: number; colour?: string; length?: number; snap?: boolean; }; zoom?: { controls?: boolean; wheel?: boolean; startScale?: number; maxScale?: number; minScale?: number; scaleSpeed?: number; }; } interface BlocklyTheme { defaultBlockStyles?: {[blocks: string]: Blockly.Theme.BlockStyle;}; categoryStyles?: {[category: string]: Blockly.Theme.CategoryStyle;}; } interface Metrics { absoluteLeft: number; absoluteTop: number; contentHeight: number; contentLeft: number; contentTop: number; contentWidth: number; viewHeight: number; viewLeft: number; viewTop: number; viewWidth: number; } /** * Set the Blockly locale. * Note: this method is only available in the npm release of Blockly. * @param {!Object} msg An object of Blockly message strings in the desired * language. */ function setLocale(msg: {[key: string]: string;}): void; } declare module Blockly { class Block extends Block__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Block__Class { /** * Class for one block. * Not normally called directly, workspace.newBlock() is preferred. * @param {!Blockly.Workspace} workspace The block's workspace. * @param {?string} prototypeName Name of the language object containing * type-specific functions for this block. * @param {string=} opt_id Optional ID. Use this ID if provided, otherwise * create a new ID. * @constructor * @throws When block is not valid or block name is not allowed. */ constructor(workspace: Blockly.Workspace, prototypeName: string, opt_id?: string); /** @type {string} */ id: string; /** @type {Blockly.Connection} */ outputConnection: Blockly.Connection; /** @type {Blockly.Connection} */ nextConnection: Blockly.Connection; /** @type {Blockly.Connection} */ previousConnection: Blockly.Connection; /** @type {!Array.<!Blockly.Input>} */ inputList: Blockly.Input[]; /** @type {boolean|undefined} */ inputsInline: boolean|any /*undefined*/; /** @type {string|!Function} */ tooltip: string|Function; /** @type {boolean} */ contextMenu: boolean; /** * @type {Blockly.Block} * @protected */ parentBlock_: Blockly.Block; /** * @type {!Array.<!Blockly.Block>} * @protected */ childBlocks_: Blockly.Block[]; /** * @type {boolean} * @protected */ collapsed_: boolean; /** * A string representing the comment attached to this block. * @type {string|Blockly.Comment} * @deprecated August 2019. Use getCommentText instead. */ comment: string|Blockly.Comment; /** * A model of the comment attached to this block. * @type {!Blockly.Block.CommentModel} * @package */ commentModel: Blockly.Block.CommentModel; /** @type {!Blockly.Workspace} */ workspace: Blockly.Workspace; /** @type {boolean} */ isInFlyout: boolean; /** @type {boolean} */ isInMutator: boolean; /** @type {boolean} */ RTL: boolean; /** * True if this block is an insertion marker. * @type {boolean} * @protected */ isInsertionMarker_: boolean; /** * Name of the type of hat. * @type {string|undefined} */ hat: string|any /*undefined*/; /** @type {string} */ type: string; /** @type {boolean|undefined} */ inputsInlineDefault: boolean|any /*undefined*/; /** * Optional text data that round-trips between blocks and XML. * Has no effect. May be used by 3rd parties for meta information. * @type {?string} */ data: string; /** * Has this block been disposed of? * @type {boolean} * @package */ disposed: boolean; /** * An optional serialization method for defining how to serialize the * mutation state. This must be coupled with defining `domToMutation`. * @type {?function(...):!Element} */ mutationToDom: any /*missing*/; /** * An optional deserialization method for defining how to deserialize the * mutation state. This must be coupled with defining `mutationToDom`. * @type {?function(!Element)} */ domToMutation: { (_0: Element): any /*missing*/ }; /** * An optional property for suppressing adding STATEMENT_PREFIX and * STATEMENT_SUFFIX to generated code. * @type {?boolean} */ suppressPrefixSuffix: boolean; /** * Dispose of this block. * @param {boolean} healStack If true, then try to heal any gap by connecting * the next statement with the previous statement. Otherwise, dispose of * all children of this block. */ dispose(healStack: boolean): void; /** * Call initModel on all fields on the block. * May be called more than once. * Either initModel or initSvg must be called after creating a block and before * the first interaction with it. Interactions include UI actions * (e.g. clicking and dragging) and firing events (e.g. create, delete, and * change). * @public */ initModel(): void; /** * Unplug this block from its superior block. If this block is a statement, * optionally reconnect the block underneath with the block on top. * @param {boolean=} opt_healStack Disconnect child statement and reconnect * stack. Defaults to false. */ unplug(opt_healStack?: boolean): void; /** * Walks down a stack of blocks and finds the last next connection on the stack. * @return {Blockly.Connection} The last next connection on the stack, or null. * @package */ lastConnectionInStack(): Blockly.Connection; /** * Bump unconnected blocks out of alignment. Two blocks which aren't actually * connected should not coincidentally line up on screen. */ bumpNeighbours(): void; /** * Return the parent block or null if this block is at the top level. * @return {Blockly.Block} The block that holds the current block. */ getParent(): Blockly.Block; /** * Return the input that connects to the specified block. * @param {!Blockly.Block} block A block connected to an input on this block. * @return {Blockly.Input} The input that connects to the specified block. */ getInputWithBlock(block: Blockly.Block): Blockly.Input; /** * Return the parent block that surrounds the current block, or null if this * block has no surrounding block. A parent block might just be the previous * statement, whereas the surrounding block is an if statement, while loop, etc. * @return {Blockly.Block} The block that surrounds the current block. */ getSurroundParent(): Blockly.Block; /** * Return the next statement block directly connected to this block. * @return {Blockly.Block} The next statement block or null. */ getNextBlock(): Blockly.Block; /** * Return the previous statement block directly connected to this block. * @return {Blockly.Block} The previous statement block or null. */ getPreviousBlock(): Blockly.Block; /** * Return the connection on the first statement input on this block, or null if * there are none. * @return {Blockly.Connection} The first statement connection or null. * @package */ getFirstStatementConnection(): Blockly.Connection; /** * Return the top-most block in this block's tree. * This will return itself if this block is at the top level. * @return {!Blockly.Block} The root block. */ getRootBlock(): Blockly.Block; /** * Find all the blocks that are directly nested inside this one. * Includes value and statement inputs, as well as any following statement. * Excludes any connection on an output tab or any preceding statement. * Blocks are optionally sorted by position; top to bottom. * @param {boolean} ordered Sort the list if true. * @return {!Array.<!Blockly.Block>} Array of blocks. */ getChildren(ordered: boolean): Blockly.Block[]; /** * Set parent of this block to be a new block or null. * @param {Blockly.Block} newParent New parent block. */ setParent(newParent: Blockly.Block): void; /** * Find all the blocks that are directly or indirectly nested inside this one. * Includes this block in the list. * Includes value and statement inputs, as well as any following statements. * Excludes any connection on an output tab or any preceding statements. * Blocks are optionally sorted by position; top to bottom. * @param {boolean} ordered Sort the list if true. * @return {!Array.<!Blockly.Block>} Flattened array of blocks. */ getDescendants(ordered: boolean): Blockly.Block[]; /** * Get whether this block is deletable or not. * @return {boolean} True if deletable. */ isDeletable(): boolean; /** * Set whether this block is deletable or not. * @param {boolean} deletable True if deletable. */ setDeletable(deletable: boolean): void; /** * Get whether this block is movable or not. * @return {boolean} True if movable. */ isMovable(): boolean; /** * Set whether this block is movable or not. * @param {boolean} movable True if movable. */ setMovable(movable: boolean): void; /** * Get whether is block is duplicatable or not. If duplicating this block and * descendants will put this block over the workspace's capacity this block is * not duplicatable. If duplicating this block and descendants will put any * type over their maxInstances this block is not duplicatable. * @return {boolean} True if duplicatable. */ isDuplicatable(): boolean; /** * Get whether this block is a shadow block or not. * @return {boolean} True if a shadow. */ isShadow(): boolean; /** * Set whether this block is a shadow block or not. * @param {boolean} shadow True if a shadow. */ setShadow(shadow: boolean): void; /** * Get whether this block is an insertion marker block or not. * @return {boolean} True if an insertion marker. * @package */ isInsertionMarker(): boolean; /** * Set whether this block is an insertion marker block or not. * Once set this cannot be unset. * @param {boolean} insertionMarker True if an insertion marker. * @package */ setInsertionMarker(insertionMarker: boolean): void; /** * Get whether this block is editable or not. * @return {boolean} True if editable. */ isEditable(): boolean; /** * Set whether this block is editable or not. * @param {boolean} editable True if editable. */ setEditable(editable: boolean): void; /** * Find the connection on this block that corresponds to the given connection * on the other block. * Used to match connections between a block and its insertion marker. * @param {!Blockly.Block} otherBlock The other block to match against. * @param {!Blockly.Connection} conn The other connection to match. * @return {Blockly.Connection} The matching connection on this block, or null. * @package */ getMatchingConnection(otherBlock: Blockly.Block, conn: Blockly.Connection): Blockly.Connection; /** * Set the URL of this block's help page. * @param {string|Function} url URL string for block help, or function that * returns a URL. Null for no help. */ setHelpUrl(url: string|Function): void; /** * Change the tooltip text for a block. * @param {string|!Function} newTip Text for tooltip or a parent element to * link to for its tooltip. May be a function that returns a string. */ setTooltip(newTip: string|Function): void; /** * Get the colour of a block. * @return {string} #RRGGBB string. */ getColour(): string; /** * Get the secondary colour of a block. * @return {?string} #RRGGBB string. */ getColourSecondary(): string; /** * Get the tertiary colour of a block. * @return {?string} #RRGGBB string. */ getColourTertiary(): string; /** * Get the shadow colour of a block. * @return {?string} #RRGGBB string. */ getColourShadow(): string; /** * Get the border colour(s) of a block. * @return {{colourDark, colourLight, colourBorder}} An object containing * colour values for the border(s) of the block. If the block is using a * style the colourBorder will be defined and equal to the tertiary colour * of the style (#RRGGBB string). Otherwise the colourDark and colourLight * attributes will be defined (#RRGGBB strings). * @package */ getColourBorder(): { colourDark: any /*missing*/; colourLight: any /*missing*/; colourBorder: any /*missing*/ }; /** * Get the name of the block style. * @return {?string} Name of the block style. */ getStyleName(): string; /** * Get the HSV hue value of a block. Null if hue not set. * @return {?number} Hue value (0-360). */ getHue(): number; /** * Change the colour of a block. * @param {number|string} colour HSV hue value (0 to 360), #RRGGBB string, * or a message reference string pointing to one of those two values. */ setColour(colour: number|string): void; /** * Set the style and colour values of a block. * @param {string} blockStyleName Name of the block style * @throws {Error} if the block style does not exist. */ setStyle(blockStyleName: string): void; /** * Sets a callback function to use whenever the block's parent workspace * changes, replacing any prior onchange handler. This is usually only called * from the constructor, the block type initializer function, or an extension * initializer function. * @param {function(Blockly.Events.Abstract)} onchangeFn The callback to call * when the block's workspace changes. * @throws {Error} if onchangeFn is not falsey or a function. */ setOnChange(onchangeFn: { (_0: Blockly.Events.Abstract): any /*missing*/ }): void; /** * Returns the named field from a block. * @param {string} name The name of the field. * @return {Blockly.Field} Named field, or null if field does not exist. */ getField(name: string): Blockly.Field; /** * Return all variables referenced by this block. * @return {!Array.<string>} List of variable names. * @package */ getVars(): string[]; /** * Return all variables referenced by this block. * @return {!Array.<!Blockly.VariableModel>} List of variable models. * @package */ getVarModels(): Blockly.VariableModel[]; /** * Notification that a variable is renaming but keeping the same ID. If the * variable is in use on this block, rerender to show the new name. * @param {!Blockly.VariableModel} variable The variable being renamed. * @package */ updateVarName(variable: Blockly.VariableModel): void; /** * Notification that a variable is renaming. * If the ID matches one of this block's variables, rename it. * @param {string} oldId ID of variable to rename. * @param {string} newId ID of new variable. May be the same as oldId, but with * an updated name. */ renameVarById(oldId: string, newId: string): void; /** * Returns the language-neutral value from the field of a block. * @param {string} name The name of the field. * @return {*} Value from the field or null if field does not exist. */ getFieldValue(name: string): any; /** * Change the field value for a block (e.g. 'CHOOSE' or 'REMOVE'). * @param {string} newValue Value to be the new field. * @param {string} name The name of the field. */ setFieldValue(newValue: string, name: string): void; /** * Set whether this block can chain onto the bottom of another block. * @param {boolean} newBoolean True if there can be a previous statement. * @param {(string|Array.<string>|null)=} opt_check Statement type or * list of statement types. Null/undefined if any type could be connected. */ setPreviousStatement(newBoolean: boolean, opt_check?: string|string[]|any /*null*/): void; /** * Set whether another block can chain onto the bottom of this block. * @param {boolean} newBoolean True if there can be a next statement. * @param {(string|Array.<string>|null)=} opt_check Statement type or * list of statement types. Null/undefined if any type could be connected. */ setNextStatement(newBoolean: boolean, opt_check?: string|string[]|any /*null*/): void; /** * Set whether this block returns a value. * @param {boolean} newBoolean True if there is an output. * @param {(string|Array.<string>|null)=} opt_check Returned type or list * of returned types. Null or undefined if any type could be returned * (e.g. variable get). */ setOutput(newBoolean: boolean, opt_check?: string|string[]|any /*null*/): void; /** * Set whether value inputs are arranged horizontally or vertically. * @param {boolean} newBoolean True if inputs are horizontal. */ setInputsInline(newBoolean: boolean): void; /** * Get whether value inputs are arranged horizontally or vertically. * @return {boolean} True if inputs are horizontal. */ getInputsInline(): boolean; /** * Set whether the block is disabled or not. * @param {boolean} disabled True if disabled. * @deprecated May 2019 */ setDisabled(disabled: boolean): void; /** * Get whether this block is enabled or not. * @return {boolean} True if enabled. */ isEnabled(): boolean; /** * Set whether the block is enabled or not. * @param {boolean} enabled True if enabled. */ setEnabled(enabled: boolean): void; /** * Get whether the block is disabled or not due to parents. * The block's own disabled property is not considered. * @return {boolean} True if disabled. */ getInheritedDisabled(): boolean; /** * Get whether the block is collapsed or not. * @return {boolean} True if collapsed. */ isCollapsed(): boolean; /** * Set whether the block is collapsed or not. * @param {boolean} collapsed True if collapsed. */ setCollapsed(collapsed: boolean): void; /** * Create a human-readable text representation of this block and any children. * @param {number=} opt_maxLength Truncate the string to this length. * @param {string=} opt_emptyToken The placeholder string used to denote an * empty field. If not specified, '?' is used. * @return {string} Text of block. */ toString(opt_maxLength?: number, opt_emptyToken?: string): string; /** * Shortcut for appending a value input row. * @param {string} name Language-neutral identifier which may used to find this * input again. Should be unique to this block. * @return {!Blockly.Input} The input object created. */ appendValueInput(name: string): Blockly.Input; /** * Shortcut for appending a statement input row. * @param {string} name Language-neutral identifier which may used to find this * input again. Should be unique to this block. * @return {!Blockly.Input} The input object created. */ appendStatementInput(name: string): Blockly.Input; /** * Shortcut for appending a dummy input row. * @param {string=} opt_name Language-neutral identifier which may used to find * this input again. Should be unique to this block. * @return {!Blockly.Input} The input object created. */ appendDummyInput(opt_name?: string): Blockly.Input; /** * Initialize this block using a cross-platform, internationalization-friendly * JSON description. * @param {!Object} json Structured data describing the block. */ jsonInit(json: Object): void; /** * Add key/values from mixinObj to this block object. By default, this method * will check that the keys in mixinObj will not overwrite existing values in * the block, including prototype values. This provides some insurance against * mixin / extension incompatibilities with future block features. This check * can be disabled by passing true as the second argument. * @param {!Object} mixinObj The key/values pairs to add to this block object. * @param {boolean=} opt_disableCheck Option flag to disable overwrite checks. */ mixin(mixinObj: Object, opt_disableCheck?: boolean): void; /** * Add a value input, statement input or local variable to this block. * @param {number} type Either Blockly.INPUT_VALUE or Blockly.NEXT_STATEMENT or * Blockly.DUMMY_INPUT. * @param {string} name Language-neutral identifier which may used to find this * input again. Should be unique to this block. * @return {!Blockly.Input} The input object created. * @protected */ appendInput_(type: number, name: string): Blockly.Input; /** * Move a named input to a different location on this block. * @param {string} name The name of the input to move. * @param {?string} refName Name of input that should be after the moved input, * or null to be the input at the end. */ moveInputBefore(name: string, refName: string): void; /** * Move a numbered input to a different location on this block. * @param {number} inputIndex Index of the input to move. * @param {number} refIndex Index of input that should be after the moved input. */ moveNumberedInputBefore(inputIndex: number, refIndex: number): void; /** * Remove an input from this block. * @param {string} name The name of the input. * @param {boolean=} opt_quiet True to prevent error if input is not present. * @throws {Error} if the input is not present and * opt_quiet is not true. */ removeInput(name: string, opt_quiet?: boolean): void; /** * Fetches the named input object. * @param {string} name The name of the input. * @return {Blockly.Input} The input object, or null if input does not exist. */ getInput(name: string): Blockly.Input; /** * Fetches the block attached to the named input. * @param {string} name The name of the input. * @return {Blockly.Block} The attached value block, or null if the input is * either disconnected or if the input does not exist. */ getInputTargetBlock(name: string): Blockly.Block; /** * Returns the comment on this block (or null if there is no comment). * @return {string} Block's comment. */ getCommentText(): string; /** * Set this block's comment text. * @param {?string} text The text, or null to delete. */ setCommentText(text: string): void; /** * Set this block's warning text. * @param {?string} _text The text, or null to delete. * @param {string=} _opt_id An optional ID for the warning text to be able to * maintain multiple warnings. */ setWarningText(_text: string, _opt_id?: string): void; /** * Give this block a mutator dialog. * @param {Blockly.Mutator} _mutator A mutator dialog instance or null to * remove. */ setMutator(_mutator: Blockly.Mutator): void; /** * Return the coordinates of the top-left corner of this block relative to the * drawing surface's origin (0,0), in workspace units. * @return {!Blockly.utils.Coordinate} Object with .x and .y properties. */ getRelativeToSurfaceXY(): Blockly.utils.Coordinate; /** * Move a block by a relative offset. * @param {number} dx Horizontal offset, in workspace units. * @param {number} dy Vertical offset, in workspace units. */ moveBy(dx: number, dy: number): void; /** * Recursively checks whether all statement and value inputs are filled with * blocks. Also checks all following statement blocks in this stack. * @param {boolean=} opt_shadowBlocksAreFilled An optional argument controlling * whether shadow blocks are counted as filled. Defaults to true. * @return {boolean} True if all inputs are filled, false otherwise. */ allInputsFilled(opt_shadowBlocksAreFilled?: boolean): boolean; /** * This method returns a string describing this Block in developer terms (type * name and ID; English only). * * Intended to on be used in console logs and errors. If you need a string that * uses the user's native language (including block text, field values, and * child blocks), use [toString()]{@link Blockly.Block#toString}. * @return {string} The description. */ toDevString(): string; } } declare module Blockly.Block { /** * @typedef {{ * text:?string, * pinned:boolean, * size:Blockly.utils.Size * }} */ interface CommentModel { text: string; pinned: boolean; size: Blockly.utils.Size } } declare module Blockly.blockAnimations { /** * Play some UI effects (sound, animation) when disposing of a block. * @param {!Blockly.BlockSvg} block The block being disposed of. * @package */ function disposeUiEffect(block: Blockly.BlockSvg): void; /** * Play some UI effects (sound, ripple) after a connection has been established. * @param {!Blockly.BlockSvg} block The block being connected. * @package */ function connectionUiEffect(block: Blockly.BlockSvg): void; /** * Play some UI effects (sound, animation) when disconnecting a block. * @param {!Blockly.BlockSvg} block The block being disconnected. * @package */ function disconnectUiEffect(block: Blockly.BlockSvg): void; /** * Stop the disconnect UI animation immediately. * @package */ function disconnectUiStop(): void; } declare module Blockly { class BlockDragSurfaceSvg extends BlockDragSurfaceSvg__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockDragSurfaceSvg__Class { /** * Class for a drag surface for the currently dragged block. This is a separate * SVG that contains only the currently moving block, or nothing. * @param {!Element} container Containing element. * @constructor */ constructor(container: Element); /** * Create the drag surface and inject it into the container. */ createDom(): void; /** * Set the SVG blocks on the drag surface's group and show the surface. * Only one block group should be on the drag surface at a time. * @param {!SVGElement} blocks Block or group of blocks to place on the drag * surface. */ setBlocksAndShow(blocks: SVGElement): void; /** * Translate and scale the entire drag surface group to the given position, to * keep in sync with the workspace. * @param {number} x X translation in workspace coordinates. * @param {number} y Y translation in workspace coordinates. * @param {number} scale Scale of the group. */ translateAndScaleGroup(x: number, y: number, scale: number): void; /** * Translate the entire drag surface during a drag. * We translate the drag surface instead of the blocks inside the surface * so that the browser avoids repainting the SVG. * Because of this, the drag coordinates must be adjusted by scale. * @param {number} x X translation for the entire surface. * @param {number} y Y translation for the entire surface. */ translateSurface(x: number, y: number): void; /** * Reports the surface translation in scaled workspace coordinates. * Use this when finishing a drag to return blocks to the correct position. * @return {!Blockly.utils.Coordinate} Current translation of the surface. */ getSurfaceTranslation(): Blockly.utils.Coordinate; /** * Provide a reference to the drag group (primarily for * BlockSvg.getRelativeToSurfaceXY). * @return {SVGElement} Drag surface group element. */ getGroup(): SVGElement; /** * Get the current blocks on the drag surface, if any (primarily * for BlockSvg.getRelativeToSurfaceXY). * @return {!Element|undefined} Drag surface block DOM element, or undefined * if no blocks exist. */ getCurrentBlock(): Element|any /*undefined*/; /** * Clear the group and hide the surface; move the blocks off onto the provided * element. * If the block is being deleted it doesn't need to go back to the original * surface, since it would be removed immediately during dispose. * @param {Element=} opt_newSurface Surface the dragging blocks should be moved * to, or null if the blocks should be removed from this surface without * being moved to a different surface. */ clearAndHide(opt_newSurface?: Element): void; } } declare module Blockly { class BlockDragger extends BlockDragger__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockDragger__Class { /** * Class for a block dragger. It moves blocks around the workspace when they * are being dragged by a mouse or touch. * @param {!Blockly.BlockSvg} block The block to drag. * @param {!Blockly.WorkspaceSvg} workspace The workspace to drag on. * @constructor */ constructor(block: Blockly.BlockSvg, workspace: Blockly.WorkspaceSvg); /** * Sever all links from this object. * @package */ dispose(): void; /** * Start dragging a block. This includes moving it to the drag surface. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at mouse down, in pixel units. * @param {boolean} healStack Whether or not to heal the stack after * disconnecting. * @package */ startBlockDrag(currentDragDeltaXY: Blockly.utils.Coordinate, healStack: boolean): void; /** * Execute a step of block dragging, based on the given event. Update the * display accordingly. * @param {!Event} e The most recent move event. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel units. * @package */ dragBlock(e: Event, currentDragDeltaXY: Blockly.utils.Coordinate): void; /** * Finish a block drag and put the block back on the workspace. * @param {!Event} e The mouseup/touchend event. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel units. * @package */ endBlockDrag(e: Event, currentDragDeltaXY: Blockly.utils.Coordinate): void; /** * Get a list of the insertion markers that currently exist. Drags have 0, 1, * or 2 insertion markers. * @return {!Array.<!Blockly.BlockSvg>} A possibly empty list of insertion * marker blocks. * @package */ getInsertionMarkers(): Blockly.BlockSvg[]; } } declare module Blockly.Events { class BlockBase extends BlockBase__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockBase__Class extends Blockly.Events.Abstract__Class { /** * Abstract class for a block event. * @param {Blockly.Block} block The block this event corresponds to. * @extends {Blockly.Events.Abstract} * @constructor */ constructor(block: Blockly.Block); /** * The block id for the block this event pertains to * @type {string} */ blockId: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; } class Change extends Change__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Change__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block change event. * @param {Blockly.Block} block The changed block. Null for a blank event. * @param {string} element One of 'field', 'comment', 'disabled', etc. * @param {?string} name Name of input or field affected, or null. * @param {*} oldValue Previous value of element. * @param {*} newValue New value of element. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block, element: string, name: string, oldValue: any, newValue: any); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Does this event record any change of state? * @return {boolean} False if something changed. */ isNull(): boolean; /** * Run a change event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class BlockChange extends BlockChange__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockChange__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block change event. * @param {Blockly.Block} block The changed block. Null for a blank event. * @param {string} element One of 'field', 'comment', 'disabled', etc. * @param {?string} name Name of input or field affected, or null. * @param {*} oldValue Previous value of element. * @param {*} newValue New value of element. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block, element: string, name: string, oldValue: any, newValue: any); } class Create extends Create__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Create__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block creation event. * @param {Blockly.Block} block The created block. Null for a blank event. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a creation event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class BlockCreate extends BlockCreate__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockCreate__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block creation event. * @param {Blockly.Block} block The created block. Null for a blank event. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block); } class Delete extends Delete__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Delete__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block deletion event. * @param {Blockly.Block} block The deleted block. Null for a blank event. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a deletion event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class BlockDelete extends BlockDelete__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockDelete__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block deletion event. * @param {Blockly.Block} block The deleted block. Null for a blank event. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block); } class Move extends Move__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Move__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block move event. Created before the move. * @param {Blockly.Block} block The moved block. Null for a blank event. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Record the block's new location. Called after the move. */ recordNew(): void; /** * Does this event record any change of state? * @return {boolean} False if something changed. */ isNull(): boolean; /** * Run a move event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class BlockMove extends BlockMove__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockMove__Class extends Blockly.Events.BlockBase__Class { /** * Class for a block move event. Created before the move. * @param {Blockly.Block} block The moved block. Null for a blank event. * @extends {Blockly.Events.BlockBase} * @constructor */ constructor(block: Blockly.Block); } } declare module Blockly { class BlockSvg extends BlockSvg__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BlockSvg__Class extends Blockly.Block__Class { /** * Class for a block's SVG representation. * Not normally called directly, workspace.newBlock() is preferred. * @param {!Blockly.WorkspaceSvg} workspace The block's workspace. * @param {?string} prototypeName Name of the language object containing * type-specific functions for this block. * @param {string=} opt_id Optional ID. Use this ID if provided, otherwise * create a new ID. * @extends {Blockly.Block} * @constructor */ constructor(workspace: Blockly.WorkspaceSvg, prototypeName: string, opt_id?: string); /** * The renderer's path object. * @type {Blockly.blockRendering.IPathObject} * @package */ pathObject: Blockly.blockRendering.IPathObject; /** @type {boolean} */ rendered: boolean; /** * Height of this block, not including any statement blocks above or below. * Height is in workspace units. */ height: any /*missing*/; /** * Width of this block, including any connected value blocks. * Width is in workspace units. */ width: any /*missing*/; /** * An optional method called when a mutator dialog is first opened. * This function must create and initialize a top-level block for the mutator * dialog, and return it. This function should also populate this top-level * block with any sub-blocks which are appropriate. This method must also be * coupled with defining a `compose` method for the default mutation dialog * button and UI to appear. * @type {?function(Blockly.WorkspaceSvg):!Blockly.BlockSvg} */ decompose: { (_0: Blockly.WorkspaceSvg): Blockly.BlockSvg }; /** * An optional method called when a mutator dialog saves its content. * This function is called to modify the original block according to new * settings. This method must also be coupled with defining a `decompose` * method for the default mutation dialog button and UI to appear. * @type {?function(!Blockly.BlockSvg)} */ compose: { (_0: Blockly.BlockSvg): any /*missing*/ }; /** * Create and initialize the SVG representation of the block. * May be called more than once. */ initSvg(): void; /** * Select this block. Highlight it visually. */ select(): void; /** * Unselect this block. Remove its highlighting. */ unselect(): void; /** * Block's mutator icon (if any). * @type {Blockly.Mutator} */ mutator: Blockly.Mutator; /** * Block's comment icon (if any). * @type {Blockly.Comment} * @deprecated August 2019. Use getCommentIcon instead. */ comment: Blockly.Comment; /** * Block's warning icon (if any). * @type {Blockly.Warning} */ warning: Blockly.Warning; /** * Returns a list of mutator, comment, and warning icons. * @return {!Array} List of icons. */ getIcons(): any[]; /** * Return the coordinates of the top-left corner of this block relative to the * drawing surface's origin (0,0), in workspace units. * If the block is on the workspace, (0, 0) is the origin of the workspace * coordinate system. * This does not change with workspace scale. * @return {!Blockly.utils.Coordinate} Object with .x and .y properties in * workspace coordinates. */ getRelativeToSurfaceXY(): Blockly.utils.Coordinate; /** * Move a block by a relative offset. * @param {number} dx Horizontal offset in workspace units. * @param {number} dy Vertical offset in workspace units. */ moveBy(dx: number, dy: number): void; /** * Transforms a block by setting the translation on the transform attribute * of the block's SVG. * @param {number} x The x coordinate of the translation in workspace units. * @param {number} y The y coordinate of the translation in workspace units. */ translate(x: number, y: number): void; /** * Move a block to a position. * @param {Blockly.utils.Coordinate} xy The position to move to in workspace units. */ moveTo(xy: Blockly.utils.Coordinate): void; /** * Move this block during a drag, taking into account whether we are using a * drag surface to translate blocks. * This block must be a top-level block. * @param {!Blockly.utils.Coordinate} newLoc The location to translate to, in * workspace coordinates. * @package */ moveDuringDrag(newLoc: Blockly.utils.Coordinate): void; /** * Snap this block to the nearest grid point. */ snapToGrid(): void; /** * Returns the coordinates of a bounding box describing the dimensions of this * block and any blocks stacked below it. * Coordinate system: workspace coordinates. * @return {!Blockly.utils.Rect} Object with coordinates of the bounding box. */ getBoundingRectangle(): Blockly.utils.Rect; /** * Notify every input on this block to mark its fields as dirty. * A dirty field is a field that needs to be re-rendererd. */ markDirty(): void; /** * Set whether the block is collapsed or not. * @param {boolean} collapsed True if collapsed. */ setCollapsed(collapsed: boolean): void; /** * Open the next (or previous) FieldTextInput. * @param {Blockly.Field|Blockly.Block} start Current location. * @param {boolean} forward If true go forward, otherwise backward. */ tab(start: Blockly.Field|Blockly.Block, forward: boolean): void; /** * Generate the context menu for this block. * @protected * @return {Array.<!Object>} Context menu options */ generateContextMenu(): Object[]; /** * Recursively adds or removes the dragging class to this node and its children. * @param {boolean} adding True if adding, false if removing. * @package */ setDragging(adding: boolean): void; /** * Add or remove the UI indicating if this block is movable or not. */ updateMovable(): void; /** * Set whether this block is movable or not. * @param {boolean} movable True if movable. */ setMovable(movable: boolean): void; /** * Set whether this block is editable or not. * @param {boolean} editable True if editable. */ setEditable(editable: boolean): void; /** * Set whether this block is a shadow block or not. * @param {boolean} shadow True if a shadow. */ setShadow(shadow: boolean): void; /** * Set whether this block is an insertion marker block or not. * Once set this cannot be unset. * @param {boolean} insertionMarker True if an insertion marker. * @package */ setInsertionMarker(insertionMarker: boolean): void; /** * Return the root node of the SVG or null if none exists. * @return {SVGElement} The root SVG node (probably a group). */ getSvgRoot(): SVGElement; /** * Dispose of this block. * @param {boolean=} healStack If true, then try to heal any gap by connecting * the next statement with the previous statement. Otherwise, dispose of * all children of this block. * @param {boolean=} animate If true, show a disposal animation and sound. */ dispose(healStack?: boolean, animate?: boolean): void; /** * Change the colour of a block. */ updateColour(): void; /** * Sets the colour of the border. * Removes the light and dark paths if a border colour is defined. */ setBorderColour_(): void; /** * Sets the colour of shadow blocks. * @return {?string} The background colour of the block. */ setShadowColour_(): string; /** * Enable or disable a block. */ updateDisabled(): void; /** * Get the comment icon attached to this block, or null if the block has no * comment. * @return {Blockly.Comment} The comment icon attached to this block, or null. */ getCommentIcon(): Blockly.Comment; /** * Set this block's comment text. * @param {?string} text The text, or null to delete. */ setCommentText(text: string): void; /** * Set this block's warning text. * @param {?string} text The text, or null to delete. * @param {string=} opt_id An optional ID for the warning text to be able to * maintain multiple warnings. */ setWarningText(text: string, opt_id?: string): void; /** * Give this block a mutator dialog. * @param {Blockly.Mutator} mutator A mutator dialog instance or null to remove. */ setMutator(mutator: Blockly.Mutator): void; /** * Set whether the block is disabled or not. * @param {boolean} disabled True if disabled. * @deprecated May 2019 */ setDisabled(disabled: boolean): void; /** * Set whether the block is enabled or not. * @param {boolean} enabled True if enabled. */ setEnabled(enabled: boolean): void; /** * Set whether the block is highlighted or not. Block highlighting is * often used to visually mark blocks currently being executed. * @param {boolean} highlighted True if highlighted. */ setHighlighted(highlighted: boolean): void; /** * Select this block. Highlight it visually. */ addSelect(): void; /** * Unselect this block. Remove its highlighting. */ removeSelect(): void; /** * Update the cursor over this block by adding or removing a class. * @param {boolean} enable True if the delete cursor should be shown, false * otherwise. * @package */ setDeleteStyle(enable: boolean): void; /** * Change the colour of a block. * @param {number|string} colour HSV hue value, or #RRGGBB string. */ setColour(colour: number|string): void; /** * Move this block to the front of the visible workspace. * <g> tags do not respect z-index so SVG renders them in the * order that they are in the DOM. By placing this block first within the * block group's <g>, it will render on top of any other blocks. * @package */ bringToFront(): void; /** * Set whether this block can chain onto the bottom of another block. * @param {boolean} newBoolean True if there can be a previous statement. * @param {(string|Array.<string>|null)=} opt_check Statement type or * list of statement types. Null/undefined if any type could be connected. */ setPreviousStatement(newBoolean: boolean, opt_check?: string|string[]|any /*null*/): void; /** * Set whether another block can chain onto the bottom of this block. * @param {boolean} newBoolean True if there can be a next statement. * @param {(string|Array.<string>|null)=} opt_check Statement type or * list of statement types. Null/undefined if any type could be connected. */ setNextStatement(newBoolean: boolean, opt_check?: string|string[]|any /*null*/): void; /** * Set whether this block returns a value. * @param {boolean} newBoolean True if there is an output. * @param {(string|Array.<string>|null)=} opt_check Returned type or list * of returned types. Null or undefined if any type could be returned * (e.g. variable get). */ setOutput(newBoolean: boolean, opt_check?: string|string[]|any /*null*/): void; /** * Set whether value inputs are arranged horizontally or vertically. * @param {boolean} newBoolean True if inputs are horizontal. */ setInputsInline(newBoolean: boolean): void; /** * Remove an input from this block. * @param {string} name The name of the input. * @param {boolean=} opt_quiet True to prevent error if input is not present. * @throws {Error} if the input is not present and * opt_quiet is not true. */ removeInput(name: string, opt_quiet?: boolean): void; /** * Move a numbered input to a different location on this block. * @param {number} inputIndex Index of the input to move. * @param {number} refIndex Index of input that should be after the moved input. */ moveNumberedInputBefore(inputIndex: number, refIndex: number): void; /** * Set whether the connections are hidden (not tracked in a database) or not. * Recursively walk down all child blocks (except collapsed blocks). * @param {boolean} hidden True if connections are hidden. * @package */ setConnectionsHidden(hidden: boolean): void; /** * Returns connections originating from this block. * @param {boolean} all If true, return all connections even hidden ones. * Otherwise, for a non-rendered block return an empty list, and for a * collapsed block don't return inputs connections. * @return {!Array.<!Blockly.Connection>} Array of connections. * @package */ getConnections_(all: boolean): Blockly.Connection[]; /** * Bump unconnected blocks out of alignment. Two blocks which aren't actually * connected should not coincidentally line up on screen. */ bumpNeighbours(): void; /** * Schedule snapping to grid and bumping neighbours to occur after a brief * delay. * @package */ scheduleSnapAndBump(): void; /** * Position a block so that it doesn't move the target block when connected. * The block to position is usually either the first block in a dragged stack or * an insertion marker. * @param {!Blockly.Connection} sourceConnection The connection on the moving * block's stack. * @param {!Blockly.Connection} targetConnection The connection that should stay * stationary as this block is positioned. */ positionNearConnection(sourceConnection: Blockly.Connection, targetConnection: Blockly.Connection): void; /** * Render the block. * Lays out and reflows a block based on its contents and settings. * @param {boolean=} opt_bubble If false, just render this block. * If true, also render block's parent, grandparent, etc. Defaults to true. */ render(opt_bubble?: boolean): void; /** * Add the cursor svg to this block's svg group. * @param {SVGElement} cursorSvg The svg root of the cursor to be added to the * block svg group. * @package */ setCursorSvg(cursorSvg: SVGElement): void; /** * Add the marker svg to this block's svg group. * @param {SVGElement} markerSvg The svg root of the marker to be added to the * block svg group. * @package */ setMarkerSvg(markerSvg: SVGElement): void; /** * Returns a bounding box describing the dimensions of this block * and any blocks stacked below it. * @return {!{height: number, width: number}} Object with height and width * properties in workspace units. * @package */ getHeightWidth(): { height: number; width: number }; /** * Position a new block correctly, so that it doesn't move the existing block * when connected to it. * @param {!Blockly.Block} newBlock The block to position - either the first * block in a dragged stack or an insertion marker. * @param {!Blockly.Connection} newConnection The connection on the new block's * stack - either a connection on newBlock, or the last NEXT_STATEMENT * connection on the stack if the stack's being dropped before another * block. * @param {!Blockly.Connection} existingConnection The connection on the * existing block, which newBlock should line up with. * @package */ positionNewBlock(newBlock: Blockly.Block, newConnection: Blockly.Connection, existingConnection: Blockly.Connection): void; /** * Visual effect to show that if the dragging block is dropped, this block will * be replaced. If a shadow block, it will disappear. Otherwise it will bump. * @param {boolean} add True if highlighting should be added. * @package */ highlightForReplacement(add: boolean): void; } } declare module Blockly.BlockSvg { /** * Constant for identifying rows that are to be rendered inline. * Don't collide with Blockly.INPUT_VALUE and friends. * @const */ var INLINE: any /*missing*/; /** * ID to give the "collapsed warnings" warning. Allows us to remove the * "collapsed warnings" warning without removing any warnings that belong to * the block. * @type {string} * @const */ var COLLAPSED_WARNING_ID: string; /** * Vertical space between elements. * @const * @package */ var SEP_SPACE_Y: any /*missing*/; /** * Minimum height of a block. * @const * @package */ var MIN_BLOCK_Y: any /*missing*/; /** * Width of horizontal puzzle tab. * @const * @package */ var TAB_WIDTH: any /*missing*/; /** * Do blocks with no previous or output connections have a 'hat' on top? * @const * @package */ var START_HAT: any /*missing*/; } declare module Blockly { /** * Blockly core version. * This constant is overridden by the build script (build.py) to the value of the version * in package.json. This is done during the gen_core build step. * For local builds, you can pass --define='Blockly.VERSION=X.Y.Z' to the compiler * to override this constant. * @define {string} */ var VERSION: any /*missing*/; /** * The main workspace most recently used. * Set by Blockly.WorkspaceSvg.prototype.markFocused * @type {Blockly.Workspace} */ var mainWorkspace: Blockly.Workspace; /** * Currently selected block. * @type {Blockly.Block} */ var selected: Blockly.Block; /** * Current cursor. * @type {Blockly.Cursor} */ var cursor: Blockly.Cursor; /** * Whether or not we're currently in keyboard accessibility mode. * @type {boolean} */ var keyboardAccessibilityMode: boolean; /** * Returns the dimensions of the specified SVG image. * @param {!Element} svg SVG image. * @return {!Object} Contains width and height properties. */ function svgSize(svg: Element): Object; /** * Size the workspace when the contents change. This also updates * scrollbars accordingly. * @param {!Blockly.WorkspaceSvg} workspace The workspace to resize. */ function resizeSvgContents(workspace: Blockly.WorkspaceSvg): void; /** * Size the SVG image to completely fill its container. Call this when the view * actually changes sizes (e.g. on a window resize/device orientation change). * See Blockly.resizeSvgContents to resize the workspace when the contents * change (e.g. when a block is added or removed). * Record the height/width of the SVG image. * @param {!Blockly.WorkspaceSvg} workspace Any workspace in the SVG. */ function svgResize(workspace: Blockly.WorkspaceSvg): void; /** * Close tooltips, context menus, dropdown selections, etc. * @param {boolean=} opt_allowToolbox If true, don't close the toolbox. */ function hideChaff(opt_allowToolbox?: boolean): void; /** * Returns the main workspace. Returns the last used main workspace (based on * focus). Try not to use this function, particularly if there are multiple * Blockly instances on a page. * @return {!Blockly.Workspace} The main workspace. */ function getMainWorkspace(): Blockly.Workspace; /** * Wrapper to window.alert() that app developers may override to * provide alternatives to the modal browser window. * @param {string} message The message to display to the user. * @param {function()=} opt_callback The callback when the alert is dismissed. */ function alert(message: string, opt_callback?: { (): any /*missing*/ }): void; /** * Wrapper to window.confirm() that app developers may override to * provide alternatives to the modal browser window. * @param {string} message The message to display to the user. * @param {!function(boolean)} callback The callback for handling user response. */ function confirm(message: string, callback: { (_0: boolean): any /*missing*/ }): void; /** * Wrapper to window.prompt() that app developers may override to provide * alternatives to the modal browser window. Built-in browser prompts are * often used for better text input experience on mobile device. We strongly * recommend testing mobile when overriding this. * @param {string} message The message to display to the user. * @param {string} defaultValue The value to initialize the prompt with. * @param {!function(string)} callback The callback for handling user response. */ function prompt(message: string, defaultValue: string, callback: { (_0: string): any /*missing*/ }): void; /** * Define blocks from an array of JSON block definitions, as might be generated * by the Blockly Developer Tools. * @param {!Array.<!Object>} jsonArray An array of JSON block definitions. */ function defineBlocksWithJsonArray(jsonArray: Object[]): void; /** * Bind an event to a function call. When calling the function, verifies that * it belongs to the touch stream that is currently being processed, and splits * multitouch events into multiple events as needed. * @param {!EventTarget} node Node upon which to listen. * @param {string} name Event name to listen to (e.g. 'mousedown'). * @param {Object} thisObject The value of 'this' in the function. * @param {!Function} func Function to call when event is triggered. * @param {boolean=} opt_noCaptureIdentifier True if triggering on this event * should not block execution of other event handlers on this touch or * other simultaneous touches. False by default. * @param {boolean=} opt_noPreventDefault True if triggering on this event * should prevent the default handler. False by default. If * opt_noPreventDefault is provided, opt_noCaptureIdentifier must also be * provided. * @return {!Array.<!Array>} Opaque data that can be passed to unbindEvent_. */ function bindEventWithChecks_(node: EventTarget, name: string, thisObject: Object, func: Function, opt_noCaptureIdentifier?: boolean, opt_noPreventDefault?: boolean): any[][]; /** * Bind an event to a function call. Handles multitouch events by using the * coordinates of the first changed touch, and doesn't do any safety checks for * simultaneous event processing. * @deprecated in favor of bindEventWithChecks_, but preserved for external * users. * @param {!EventTarget} node Node upon which to listen. * @param {string} name Event name to listen to (e.g. 'mousedown'). * @param {Object} thisObject The value of 'this' in the function. * @param {!Function} func Function to call when event is triggered. * @return {!Array.<!Array>} Opaque data that can be passed to unbindEvent_. */ function bindEvent_(node: EventTarget, name: string, thisObject: Object, func: Function): any[][]; /** * Unbind one or more events event from a function call. * @param {!Array.<!Array>} bindData Opaque data from bindEvent_. * This list is emptied during the course of calling this function. * @return {!Function} The function call. */ function unbindEvent_(bindData: any[][]): Function; /** * Is the given string a number (includes negative and decimals). * @param {string} str Input string. * @return {boolean} True if number, false otherwise. */ function isNumber(str: string): boolean; /** * Convert a hue (HSV model) into an RGB hex triplet. * @param {number} hue Hue on a colour wheel (0-360). * @return {string} RGB code, e.g. '#5ba65b'. */ function hueToHex(hue: number): string; /** * Checks old colour constants are not overwritten by the host application. * If a constant is overwritten, it prints a console warning directing the * developer to use the equivalent Msg constant. * @package */ function checkBlockColourConstants(): void; } declare module Blockly { class Bubble extends Bubble__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Bubble__Class { /** * Class for UI bubble. * @param {!Blockly.WorkspaceSvg} workspace The workspace on which to draw the * bubble. * @param {!Element} content SVG content for the bubble. * @param {Element} shape SVG element to avoid eclipsing. * @param {!Blockly.utils.Coordinate} anchorXY Absolute position of bubble's * anchor point. * @param {?number} bubbleWidth Width of bubble, or null if not resizable. * @param {?number} bubbleHeight Height of bubble, or null if not resizable. * @constructor */ constructor(workspace: Blockly.WorkspaceSvg, content: Element, shape: Element, anchorXY: Blockly.utils.Coordinate, bubbleWidth: number, bubbleHeight: number); /** * Function to call on resize of bubble. * @type {Function} */ resizeCallback_: Function; /** * Return the root node of the bubble's SVG group. * @return {SVGElement} The root SVG node of the bubble's group. */ getSvgRoot(): SVGElement; /** * Expose the block's ID on the bubble's top-level SVG group. * @param {string} id ID of block. */ setSvgId(id: string): void; /** * Get whether this bubble is deletable or not. * @return {boolean} True if deletable. * @package */ isDeletable(): boolean; /** * Register a function as a callback event for when the bubble is resized. * @param {!Function} callback The function to call on resize. */ registerResizeEvent(callback: Function): void; /** * Notification that the anchor has moved. * Update the arrow and bubble accordingly. * @param {!Blockly.utils.Coordinate} xy Absolute location. */ setAnchorLocation(xy: Blockly.utils.Coordinate): void; /** * Move the bubble group to the specified location in workspace coordinates. * @param {number} x The x position to move to. * @param {number} y The y position to move to. * @package */ moveTo(x: number, y: number): void; /** * Get the dimensions of this bubble. * @return {!Blockly.utils.Size} The height and width of the bubble. */ getBubbleSize(): Blockly.utils.Size; /** * Size this bubble. * @param {number} width Width of the bubble. * @param {number} height Height of the bubble. */ setBubbleSize(width: number, height: number): void; /** * Change the colour of a bubble. * @param {string} hexColour Hex code of colour. */ setColour(hexColour: string): void; /** * Dispose of this bubble. */ dispose(): void; /** * Move this bubble during a drag, taking into account whether or not there is * a drag surface. * @param {Blockly.BlockDragSurfaceSvg} dragSurface The surface that carries * rendered items during a drag, or null if no drag surface is in use. * @param {!Blockly.utils.Coordinate} newLoc The location to translate to, in * workspace coordinates. * @package */ moveDuringDrag(dragSurface: Blockly.BlockDragSurfaceSvg, newLoc: Blockly.utils.Coordinate): void; /** * Return the coordinates of the top-left corner of this bubble's body relative * to the drawing surface's origin (0,0), in workspace units. * @return {!Blockly.utils.Coordinate} Object with .x and .y properties. */ getRelativeToSurfaceXY(): Blockly.utils.Coordinate; /** * Set whether auto-layout of this bubble is enabled. The first time a bubble * is shown it positions itself to not cover any blocks. Once a user has * dragged it to reposition, it renders where the user put it. * @param {boolean} enable True if auto-layout should be enabled, false * otherwise. * @package */ setAutoLayout(enable: boolean): void; } } declare module Blockly.Bubble { /** * Width of the border around the bubble. */ var BORDER_WIDTH: any /*missing*/; /** * Determines the thickness of the base of the arrow in relation to the size * of the bubble. Higher numbers result in thinner arrows. */ var ARROW_THICKNESS: any /*missing*/; /** * The number of degrees that the arrow bends counter-clockwise. */ var ARROW_ANGLE: any /*missing*/; /** * The sharpness of the arrow's bend. Higher numbers result in smoother arrows. */ var ARROW_BEND: any /*missing*/; /** * Distance between arrow point and anchor point. */ var ANCHOR_RADIUS: any /*missing*/; } declare module Blockly { class BubbleDragger extends BubbleDragger__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BubbleDragger__Class { /** * Class for a bubble dragger. It moves things on the bubble canvas around the * workspace when they are being dragged by a mouse or touch. These can be * block comments, mutators, warnings, or workspace comments. * @param {!Blockly.Bubble|!Blockly.WorkspaceCommentSvg} bubble The item on the * bubble canvas to drag. * @param {!Blockly.WorkspaceSvg} workspace The workspace to drag on. * @constructor */ constructor(bubble: Blockly.Bubble|Blockly.WorkspaceCommentSvg, workspace: Blockly.WorkspaceSvg); /** * Sever all links from this object. * @package */ dispose(): void; /** * Start dragging a bubble. This includes moving it to the drag surface. * @package */ startBubbleDrag(): void; /** * Execute a step of bubble dragging, based on the given event. Update the * display accordingly. * @param {!Event} e The most recent move event. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel units. * @package */ dragBubble(e: Event, currentDragDeltaXY: Blockly.utils.Coordinate): void; /** * Finish a bubble drag and put the bubble back on the workspace. * @param {!Event} e The mouseup/touchend event. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel units. * @package */ endBubbleDrag(e: Event, currentDragDeltaXY: Blockly.utils.Coordinate): void; } } declare module Blockly { class Comment extends Comment__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Comment__Class extends Blockly.Icon__Class { /** * Class for a comment. * @param {!Blockly.Block} block The block associated with this comment. * @extends {Blockly.Icon} * @constructor */ constructor(block: Blockly.Block); /** * Show or hide the comment bubble. * @param {boolean} visible True if the bubble should be visible. */ setVisible(visible: boolean): void; /** * Get the dimensions of this comment's bubble. * @return {Blockly.utils.Size} Object with width and height properties. */ getBubbleSize(): Blockly.utils.Size; /** * Size this comment's bubble. * @param {number} width Width of the bubble. * @param {number} height Height of the bubble. */ setBubbleSize(width: number, height: number): void; /** * Returns this comment's text. * @return {string} Comment text. * @deprecated August 2019 Use block.getCommentText() instead. */ getText(): string; /** * Set this comment's text. * * If you want to receive a comment change event, then this should not be called * directly. Instead call block.setCommentText(); * @param {string} text Comment text. * @deprecated August 2019 Use block.setCommentText() instead. */ setText(text: string): void; /** * Update the comment's view to match the model. * @package */ updateText(): void; /** * Dispose of this comment. * * If you want to receive a comment "delete" event (newValue: null), then this * should not be called directly. Instead call block.setCommentText(null); */ dispose(): void; } } declare module Blockly { class Connection extends Connection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Connection__Class { /** * Class for a connection between blocks. * @param {!Blockly.Block} source The block establishing this connection. * @param {number} type The type of the connection. * @constructor */ constructor(source: Blockly.Block, type: number); /** * @type {!Blockly.Block} * @protected */ sourceBlock_: Blockly.Block; /** @type {number} */ type: number; /** * Connection this connection connects to. Null if not connected. * @type {Blockly.Connection} */ targetConnection: Blockly.Connection; /** * Has this connection been disposed of? * @type {boolean} * @package */ disposed: boolean; /** * Horizontal location of this connection. * @type {number} * @protected */ x_: number; /** * Vertical location of this connection. * @type {number} * @protected */ y_: number; /** * Connect two connections together. This is the connection on the superior * block. * @param {!Blockly.Connection} childConnection Connection on inferior block. * @protected */ connect_(childConnection: Blockly.Connection): void; /** * Dispose of this connection. Deal with connected blocks and remove this * connection from the database. * @package */ dispose(): void; /** * Get the source block for this connection. * @return {Blockly.Block} The source block, or null if there is none. */ getSourceBlock(): Blockly.Block; /** * Does the connection belong to a superior block (higher in the source stack)? * @return {boolean} True if connection faces down or right. */ isSuperior(): boolean; /** * Is the connection connected? * @return {boolean} True if connection is connected to another connection. */ isConnected(): boolean; /** * Check if the two connections can be dragged to connect to each other. * @param {!Blockly.Connection} candidate A nearby connection to check. * @return {boolean} True if the connection is allowed, false otherwise. */ isConnectionAllowed(candidate: Blockly.Connection): boolean; /** * Behavior after a connection attempt fails. * @param {Blockly.Connection} _otherConnection Connection that this connection * failed to connect to. * @package */ onFailedConnect(_otherConnection: Blockly.Connection): void; /** * Connect this connection to another connection. * @param {!Blockly.Connection} otherConnection Connection to connect to. */ connect(otherConnection: Blockly.Connection): void; /** * Disconnect this connection. */ disconnect(): void; /** * Disconnect two blocks that are connected by this connection. * @param {!Blockly.Block} parentBlock The superior block. * @param {!Blockly.Block} childBlock The inferior block. * @protected */ disconnectInternal_(parentBlock: Blockly.Block, childBlock: Blockly.Block): void; /** * Respawn the shadow block if there was one connected to the this connection. * @protected */ respawnShadow_(): void; /** * Returns the block that this connection connects to. * @return {Blockly.Block} The connected block or null if none is connected. */ targetBlock(): Blockly.Block; /** * Is this connection compatible with another connection with respect to the * value type system. E.g. square_root("Hello") is not compatible. * @param {!Blockly.Connection} otherConnection Connection to compare against. * @return {boolean} True if the connections share a type. * @protected */ checkType_(otherConnection: Blockly.Connection): boolean; /** * Change a connection's compatibility. * @param {string|!Array<string>} check Compatible value type or list of value * types. Null if all types are compatible. * @return {!Blockly.Connection} The connection being modified * (to allow chaining). */ setCheck(check: string|string[]): Blockly.Connection; /** * Get a connection's compatibility. * @return {Array} List of compatible value types. * Null if all types are compatible. * @public */ getCheck(): any[]; /** * Change a connection's shadow block. * @param {Element} shadow DOM representation of a block or null. */ setShadowDom(shadow: Element): void; /** * Return a connection's shadow block. * @return {Element} Shadow DOM representation of a block or null. */ getShadowDom(): Element; /** * Get the parent input of a connection. * @return {Blockly.Input} The input that the connection belongs to or null if * no parent exists. * @package */ getParentInput(): Blockly.Input; /** * This method returns a string describing this Connection in developer terms * (English only). Intended to on be used in console logs and errors. * @return {string} The description. */ toString(): string; } } declare module Blockly.Connection { /** * Constants for checking whether two connections are compatible. */ var CAN_CONNECT: any /*missing*/; } declare module Blockly { class ConnectionDB extends ConnectionDB__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ConnectionDB__Class { /** * Database of connections. * Connections are stored in order of their vertical component. This way * connections in an area may be looked up quickly using a binary search. * @constructor */ constructor(); /** * Add a connection to the database. Must not already exist in DB. * @param {!Blockly.Connection} connection The connection to be added. */ addConnection(connection: Blockly.Connection): void; /** * Find the given connection. * Starts by doing a binary search to find the approximate location, then * linearly searches nearby for the exact connection. * @param {!Blockly.Connection} conn The connection to find. * @return {number} The index of the connection, or -1 if the connection was * not found. */ findConnection(conn: Blockly.Connection): number; /** * Find all nearby connections to the given connection. * Type checking does not apply, since this function is used for bumping. * @param {!Blockly.Connection} connection The connection whose neighbours * should be returned. * @param {number} maxRadius The maximum radius to another connection. * @return {!Array.<!Blockly.Connection>} List of connections. */ getNeighbours(connection: Blockly.Connection, maxRadius: number): Blockly.Connection[]; /** * Find the closest compatible connection to this connection. * @param {!Blockly.Connection} conn The connection searching for a compatible * mate. * @param {number} maxRadius The maximum radius to another connection. * @param {!Blockly.utils.Coordinate} dxy Offset between this connection's * location in the database and the current location (as a result of * dragging). * @return {!{connection: ?Blockly.Connection, radius: number}} Contains two * properties:' connection' which is either another connection or null, * and 'radius' which is the distance. */ searchForClosest(conn: Blockly.Connection, maxRadius: number, dxy: Blockly.utils.Coordinate): { connection: Blockly.Connection; radius: number }; } } declare module Blockly.ConnectionDB { /** * Initialize a set of connection DBs for a workspace. * @return {!Array.<!Blockly.ConnectionDB>} Array of databases. */ function init(): Blockly.ConnectionDB[]; } declare module Blockly { /** * The multiplier for scroll wheel deltas using the line delta mode. * @type {number} */ var LINE_MODE_MULTIPLIER: number; /** * The multiplier for scroll wheel deltas using the page delta mode. * @type {number} */ var PAGE_MODE_MULTIPLIER: number; /** * Number of pixels the mouse must move before a drag starts. */ var DRAG_RADIUS: any /*missing*/; /** * Number of pixels the mouse must move before a drag/scroll starts from the * flyout. Because the drag-intention is determined when this is reached, it is * larger than Blockly.DRAG_RADIUS so that the drag-direction is clearer. */ var FLYOUT_DRAG_RADIUS: any /*missing*/; /** * Maximum misalignment between connections for them to snap together. */ var SNAP_RADIUS: any /*missing*/; /** * Maximum misalignment between connections for them to snap together, * when a connection is already highlighted. */ var CONNECTING_SNAP_RADIUS: any /*missing*/; /** * How much to prefer staying connected to the current connection over moving to * a new connection. The current previewed connection is considered to be this * much closer to the matching connection on the block than it actually is. */ var CURRENT_CONNECTION_PREFERENCE: any /*missing*/; /** * The main colour of insertion markers, in hex. The block is rendered a * transparent grey by changing the fill opacity in CSS. */ var INSERTION_MARKER_COLOUR: any /*missing*/; /** * Delay in ms between trigger and bumping unconnected block out of alignment. */ var BUMP_DELAY: any /*missing*/; /** * Maximum randomness in workspace units for bumping a block. */ var BUMP_RANDOMNESS: any /*missing*/; /** * Number of characters to truncate a collapsed block to. */ var COLLAPSE_CHARS: any /*missing*/; /** * Length in ms for a touch to become a long press. */ var LONGPRESS: any /*missing*/; /** * Prevent a sound from playing if another sound preceded it within this many * milliseconds. */ var SOUND_LIMIT: any /*missing*/; /** * When dragging a block out of a stack, split the stack in two (true), or drag * out the block healing the stack (false). */ var DRAG_STACK: any /*missing*/; /** * The richness of block colours, regardless of the hue. * Must be in the range of 0 (inclusive) to 1 (exclusive). */ var HSV_SATURATION: any /*missing*/; /** * The intensity of block colours, regardless of the hue. * Must be in the range of 0 (inclusive) to 1 (exclusive). */ var HSV_VALUE: any /*missing*/; /** * Sprited icons and images. */ var SPRITE: any /*missing*/; /** * ENUM for a right-facing value input. E.g. 'set item to' or 'return'. * @const */ var INPUT_VALUE: any /*missing*/; /** * ENUM for a left-facing value output. E.g. 'random fraction'. * @const */ var OUTPUT_VALUE: any /*missing*/; /** * ENUM for a down-facing block stack. E.g. 'if-do' or 'else'. * @const */ var NEXT_STATEMENT: any /*missing*/; /** * ENUM for an up-facing block stack. E.g. 'break out of loop'. * @const */ var PREVIOUS_STATEMENT: any /*missing*/; /** * ENUM for an dummy input. Used to add field(s) with no input. * @const */ var DUMMY_INPUT: any /*missing*/; /** * ENUM for left alignment. * @const */ var ALIGN_LEFT: any /*missing*/; /** * ENUM for centre alignment. * @const */ var ALIGN_CENTRE: any /*missing*/; /** * ENUM for right alignment. * @const */ var ALIGN_RIGHT: any /*missing*/; /** * ENUM for no drag operation. * @const */ var DRAG_NONE: any /*missing*/; /** * ENUM for inside the sticky DRAG_RADIUS. * @const */ var DRAG_STICKY: any /*missing*/; /** * ENUM for inside the non-sticky DRAG_RADIUS, for differentiating between * clicks and drags. * @const */ var DRAG_BEGIN: any /*missing*/; /** * ENUM for freely draggable (outside the DRAG_RADIUS, if one applies). * @const */ var DRAG_FREE: any /*missing*/; /** * Lookup table for determining the opposite type of a connection. * @const */ var OPPOSITE_TYPE: any /*missing*/; /** * ENUM for toolbox and flyout at top of screen. * @const */ var TOOLBOX_AT_TOP: any /*missing*/; /** * ENUM for toolbox and flyout at bottom of screen. * @const */ var TOOLBOX_AT_BOTTOM: any /*missing*/; /** * ENUM for toolbox and flyout at left of screen. * @const */ var TOOLBOX_AT_LEFT: any /*missing*/; /** * ENUM for toolbox and flyout at right of screen. * @const */ var TOOLBOX_AT_RIGHT: any /*missing*/; /** * ENUM representing that an event is not in any delete areas. * Null for backwards compatibility reasons. * @const */ var DELETE_AREA_NONE: any /*missing*/; /** * ENUM representing that an event is in the delete area of the trash can. * @const */ var DELETE_AREA_TRASH: any /*missing*/; /** * ENUM representing that an event is in the delete area of the toolbox or * flyout. * @const */ var DELETE_AREA_TOOLBOX: any /*missing*/; /** * String for use in the "custom" attribute of a category in toolbox XML. * This string indicates that the category should be dynamically populated with * variable blocks. * @const {string} */ var VARIABLE_CATEGORY_NAME: any /*missing*/; /** * String for use in the "custom" attribute of a category in toolbox XML. * This string indicates that the category should be dynamically populated with * variable blocks. * @const {string} */ var VARIABLE_DYNAMIC_CATEGORY_NAME: any /*missing*/; /** * String for use in the "custom" attribute of a category in toolbox XML. * This string indicates that the category should be dynamically populated with * procedure blocks. * @const {string} */ var PROCEDURE_CATEGORY_NAME: any /*missing*/; /** * String for use in the dropdown created in field_variable. * This string indicates that this option in the dropdown is 'Rename * variable...' and if selected, should trigger the prompt to rename a variable. * @const {string} */ var RENAME_VARIABLE_ID: any /*missing*/; /** * String for use in the dropdown created in field_variable. * This string indicates that this option in the dropdown is 'Delete the "%1" * variable' and if selected, should trigger the prompt to delete a variable. * @const {string} */ var DELETE_VARIABLE_ID: any /*missing*/; } declare module Blockly.ContextMenu { /** * Which block is the context menu attached to? * @type {Blockly.Block} */ var currentBlock: Blockly.Block; /** * Construct the menu based on the list of options and show the menu. * @param {!Event} e Mouse event. * @param {!Array.<!Object>} options Array of menu options. * @param {boolean} rtl True if RTL, false if LTR. */ function show(e: Event, options: Object[], rtl: boolean): void; /** * Hide the context menu. */ function hide(): void; /** * Create a callback function that creates and configures a block, * then places the new block next to the original. * @param {!Blockly.Block} block Original block. * @param {!Element} xml XML representation of new block. * @return {!Function} Function that creates a block. */ function callbackFactory(block: Blockly.Block, xml: Element): Function; /** * Make a context menu option for deleting the current block. * @param {!Blockly.BlockSvg} block The block where the right-click originated. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function blockDeleteOption(block: Blockly.BlockSvg): Object; /** * Make a context menu option for showing help for the current block. * @param {!Blockly.BlockSvg} block The block where the right-click originated. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function blockHelpOption(block: Blockly.BlockSvg): Object; /** * Make a context menu option for duplicating the current block. * @param {!Blockly.BlockSvg} block The block where the right-click originated. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function blockDuplicateOption(block: Blockly.BlockSvg): Object; /** * Make a context menu option for adding or removing comments on the current * block. * @param {!Blockly.BlockSvg} block The block where the right-click originated. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function blockCommentOption(block: Blockly.BlockSvg): Object; /** * Make a context menu option for deleting the current workspace comment. * @param {!Blockly.WorkspaceCommentSvg} comment The workspace comment where the * right-click originated. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function commentDeleteOption(comment: Blockly.WorkspaceCommentSvg): Object; /** * Make a context menu option for duplicating the current workspace comment. * @param {!Blockly.WorkspaceCommentSvg} comment The workspace comment where the * right-click originated. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function commentDuplicateOption(comment: Blockly.WorkspaceCommentSvg): Object; /** * Make a context menu option for adding a comment on the workspace. * @param {!Blockly.WorkspaceSvg} ws The workspace where the right-click * originated. * @param {!Event} e The right-click mouse event. * @return {!Object} A menu option, containing text, enabled, and a callback. * @package */ function workspaceCommentOption(ws: Blockly.WorkspaceSvg, e: Event): Object; } declare module Blockly.Css { /** * Add some CSS to the blob that will be injected later. Allows optional * components such as fields and the toolbox to store separate CSS. * The provided array of CSS will be destroyed by this function. * @param {!Array.<string>} cssArray Array of CSS strings. */ function register(cssArray: string[]): void; /** * Inject the CSS into the DOM. This is preferable over using a regular CSS * file since: * a) It loads synchronously and doesn't force a redraw later. * b) It speeds up loading by not blocking on a separate HTTP transfer. * c) The CSS content may be made dynamic depending on init options. * @param {boolean} hasCss If false, don't inject CSS * (providing CSS becomes the document's responsibility). * @param {string} pathToMedia Path from page to the Blockly media directory. */ function inject(hasCss: boolean, pathToMedia: string): void; /** * Set the cursor to be displayed when over something draggable. * See See https://github.com/google/blockly/issues/981 for context. * @param {*} _cursor Enum. * @deprecated April 2017. */ function setCursor(_cursor: any): void; /** * Array making up the CSS content for Blockly. */ var CONTENT: any /*missing*/; } declare module Blockly { class DropDownDiv extends DropDownDiv__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class DropDownDiv__Class { /** * Class for drop-down div. * @constructor */ constructor(); } } declare module Blockly.DropDownDiv { /** * Arrow size in px. Should match the value in CSS (need to position pre-render). * @type {number} * @const */ var ARROW_SIZE: number; /** * Drop-down border size in px. Should match the value in CSS (need to position the arrow). * @type {number} * @const */ var BORDER_SIZE: number; /** * Amount the arrow must be kept away from the edges of the main drop-down div, in px. * @type {number} * @const */ var ARROW_HORIZONTAL_PADDING: number; /** * Amount drop-downs should be padded away from the source, in px. * @type {number} * @const */ var PADDING_Y: number; /** * Length of animations in seconds. * @type {number} * @const */ var ANIMATION_TIME: number; /** * The default dropdown div border color. * @type {string} * @const */ var DEFAULT_DROPDOWN_BORDER_COLOR: string; /** * The default dropdown div color. * @type {string} * @const */ var DEFAULT_DROPDOWN_COLOR: string; /** * Timer for animation out, to be cleared if we need to immediately hide * without disrupting new shows. * @type {?number} */ var animateOutTimer_: number; /** * Callback for when the drop-down is hidden. * @type {?Function} */ var onHide_: Function; /** * Create and insert the DOM element for this div. */ function createDom(): void; /** * Set an element to maintain bounds within. Drop-downs will appear * within the box of this element if possible. * @param {Element} boundsElement Element to bind drop-down to. */ function setBoundsElement(boundsElement: Element): void; /** * Provide the div for inserting content into the drop-down. * @return {Element} Div to populate with content */ function getContentDiv(): Element; /** * Clear the content of the drop-down. */ function clearContent(): void; /** * Set the colour for the drop-down. * @param {string} backgroundColour Any CSS colour for the background. * @param {string} borderColour Any CSS colour for the border. */ function setColour(backgroundColour: string, borderColour: string): void; /** * Set the category for the drop-down. * @param {string} category The new category for the drop-down. */ function setCategory(category: string): void; /** * Shortcut to show and place the drop-down with positioning determined * by a particular block. The primary position will be below the block, * and the secondary position above the block. Drop-down will be * constrained to the block's workspace. * @param {!Blockly.Field} field The field showing the drop-down. * @param {!Blockly.Block} block Block to position the drop-down around. * @param {Function=} opt_onHide Optional callback for when the drop-down is * hidden. * @param {number=} opt_secondaryYOffset Optional Y offset for above-block * positioning. * @return {boolean} True if the menu rendered below block; false if above. */ function showPositionedByBlock(field: Blockly.Field, block: Blockly.Block, opt_onHide?: Function, opt_secondaryYOffset?: number): boolean; /** * Shortcut to show and place the drop-down with positioning determined * by a particular field. The primary position will be below the field, * and the secondary position above the field. Drop-down will be * constrained to the block's workspace. * @param {!Blockly.Field} field The field to position the dropdown against. * @param {Function=} opt_onHide Optional callback for when the drop-down is * hidden. * @param {number=} opt_secondaryYOffset Optional Y offset for above-block * positioning. * @return {boolean} True if the menu rendered below block; false if above. */ function showPositionedByField(field: Blockly.Field, opt_onHide?: Function, opt_secondaryYOffset?: number): boolean; /** * Show and place the drop-down. * The drop-down is placed with an absolute "origin point" (x, y) - i.e., * the arrow will point at this origin and box will positioned below or above it. * If we can maintain the container bounds at the primary point, the arrow will * point there, and the container will be positioned below it. * If we can't maintain the container bounds at the primary point, fall-back to the * secondary point and position above. * @param {Object} owner The object showing the drop-down * @param {boolean} rtl Right-to-left (true) or left-to-right (false). * @param {number} primaryX Desired origin point x, in absolute px * @param {number} primaryY Desired origin point y, in absolute px * @param {number} secondaryX Secondary/alternative origin point x, in absolute px * @param {number} secondaryY Secondary/alternative origin point y, in absolute px * @param {Function=} opt_onHide Optional callback for when the drop-down is hidden * @return {boolean} True if the menu rendered at the primary origin point. * @package */ function show(owner: Object, rtl: boolean, primaryX: number, primaryY: number, secondaryX: number, secondaryY: number, opt_onHide?: Function): boolean; /** * Helper to position the drop-down and the arrow, maintaining bounds. * See explanation of origin points in Blockly.DropDownDiv.show. * @param {number} primaryX Desired origin point x, in absolute px. * @param {number} primaryY Desired origin point y, in absolute px. * @param {number} secondaryX Secondary/alternative origin point x, * in absolute px. * @param {number} secondaryY Secondary/alternative origin point y, * in absolute px. * @return {Object} Various final metrics, including rendered positions * for drop-down and arrow. */ function getPositionMetrics(primaryX: number, primaryY: number, secondaryX: number, secondaryY: number): Object; /** * Get the metrics for positioning the div below the source. * @param {number} primaryX Desired origin point x, in absolute px. * @param {number} primaryY Desired origin point y, in absolute px. * @param {!Object} boundsInfo An object containing size information about the * bounding element (bounding box and width/height). * @param {!Object} divSize An object containing information about the size * of the DropDownDiv (width & height). * @return {Object} Various final metrics, including rendered positions * for drop-down and arrow. */ function getPositionBelowMetrics(primaryX: number, primaryY: number, boundsInfo: Object, divSize: Object): Object; /** * Get the metrics for positioning the div above the source. * @param {number} secondaryX Secondary/alternative origin point x, * in absolute px. * @param {number} secondaryY Secondary/alternative origin point y, * in absolute px. * @param {!Object} boundsInfo An object containing size information about the * bounding element (bounding box and width/height). * @param {!Object} divSize An object containing information about the size * of the DropDownDiv (width & height). * @return {Object} Various final metrics, including rendered positions * for drop-down and arrow. */ function getPositionAboveMetrics(secondaryX: number, secondaryY: number, boundsInfo: Object, divSize: Object): Object; /** * Get the metrics for positioning the div at the top of the page. * @param {number} sourceX Desired origin point x, in absolute px. * @param {!Object} boundsInfo An object containing size information about the * bounding element (bounding box and width/height). * @param {!Object} divSize An object containing information about the size * of the DropDownDiv (width & height). * @return {Object} Various final metrics, including rendered positions * for drop-down and arrow. */ function getPositionTopOfPageMetrics(sourceX: number, boundsInfo: Object, divSize: Object): Object; /** * Get the x positions for the left side of the DropDownDiv and the arrow, * accounting for the bounds of the workspace. * @param {number} sourceX Desired origin point x, in absolute px. * @param {number} boundsLeft The left edge of the bounding element, in * absolute px. * @param {number} boundsRight The right edge of the bounding element, in * absolute px. * @param {number} divWidth The width of the div in px. * @return {{divX: number, arrowX: number}} An object containing metrics for * the x positions of the left side of the DropDownDiv and the arrow. */ function getPositionX(sourceX: number, boundsLeft: number, boundsRight: number, divWidth: number): { divX: number; arrowX: number }; /** * Is the container visible? * @return {boolean} True if visible. */ function isVisible(): boolean; /** * Hide the menu only if it is owned by the provided object. * @param {Object} owner Object which must be owning the drop-down to hide. * @param {boolean=} opt_withoutAnimation True if we should hide the dropdown * without animating. * @return {boolean} True if hidden. */ function hideIfOwner(owner: Object, opt_withoutAnimation?: boolean): boolean; /** * Hide the menu, triggering animation. */ function hide(): void; /** * Hide the menu, without animation. */ function hideWithoutAnimation(): void; /** * Repositions the dropdownDiv on window resize. If it doesn't know how to * calculate the new position, it will just hide it instead. */ function repositionForWindowResize(): void; } declare module Blockly.Events { /** * Sets whether the next event should be added to the undo stack. * @type {boolean} */ var recordUndo: boolean; /** * Name of event that creates a block. Will be deprecated for BLOCK_CREATE. * @const */ var CREATE: any /*missing*/; /** * Name of event that creates a block. * @const */ var BLOCK_CREATE: any /*missing*/; /** * Name of event that deletes a block. Will be deprecated for BLOCK_DELETE. * @const */ var DELETE: any /*missing*/; /** * Name of event that deletes a block. * @const */ var BLOCK_DELETE: any /*missing*/; /** * Name of event that changes a block. Will be deprecated for BLOCK_CHANGE. * @const */ var CHANGE: any /*missing*/; /** * Name of event that changes a block. * @const */ var BLOCK_CHANGE: any /*missing*/; /** * Name of event that moves a block. Will be deprecated for BLOCK_MOVE. * @const */ var MOVE: any /*missing*/; /** * Name of event that moves a block. * @const */ var BLOCK_MOVE: any /*missing*/; /** * Name of event that creates a variable. * @const */ var VAR_CREATE: any /*missing*/; /** * Name of event that deletes a variable. * @const */ var VAR_DELETE: any /*missing*/; /** * Name of event that renames a variable. * @const */ var VAR_RENAME: any /*missing*/; /** * Name of event that records a UI change. * @const */ var UI: any /*missing*/; /** * Name of event that creates a comment. * @const */ var COMMENT_CREATE: any /*missing*/; /** * Name of event that deletes a comment. * @const */ var COMMENT_DELETE: any /*missing*/; /** * Name of event that changes a comment. * @const */ var COMMENT_CHANGE: any /*missing*/; /** * Name of event that moves a comment. * @const */ var COMMENT_MOVE: any /*missing*/; /** * Name of event that records a workspace load. */ var FINISHED_LOADING: any /*missing*/; /** * List of events that cause objects to be bumped back into the visible * portion of the workspace (only used for non-movable workspaces). * * Not to be confused with bumping so that disconnected connections to do * not appear connected. * @const */ var BUMP_EVENTS: any /*missing*/; /** * Create a custom event and fire it. * @param {!Blockly.Events.Abstract} event Custom data for event. */ function fire(event: Blockly.Events.Abstract): void; /** * Filter the queued events and merge duplicates. * @param {!Array.<!Blockly.Events.Abstract>} queueIn Array of events. * @param {boolean} forward True if forward (redo), false if backward (undo). * @return {!Array.<!Blockly.Events.Abstract>} Array of filtered events. */ function filter(queueIn: Blockly.Events.Abstract[], forward: boolean): Blockly.Events.Abstract[]; /** * Modify pending undo events so that when they are fired they don't land * in the undo stack. Called by Blockly.Workspace.clearUndo. */ function clearPendingUndo(): void; /** * Stop sending events. Every call to this function MUST also call enable. */ function disable(): void; /** * Start sending events. Unless events were already disabled when the * corresponding call to disable was made. */ function enable(): void; /** * Returns whether events may be fired or not. * @return {boolean} True if enabled. */ function isEnabled(): boolean; /** * Current group. * @return {string} ID string. */ function getGroup(): string; /** * Start or stop a group. * @param {boolean|string} state True to start new group, false to end group. * String to set group explicitly. */ function setGroup(state: boolean|string): void; /** * Compute a list of the IDs of the specified block and all its descendants. * @param {!Blockly.Block} block The root block. * @return {!Array.<string>} List of block IDs. * @package */ function getDescendantIds(block: Blockly.Block): string[]; /** * Decode the JSON into an event. * @param {!Object} json JSON representation. * @param {!Blockly.Workspace} workspace Target workspace for event. * @return {!Blockly.Events.Abstract} The event represented by the JSON. */ function fromJson(json: Object, workspace: Blockly.Workspace): Blockly.Events.Abstract; /** * Enable/disable a block depending on whether it is properly connected. * Use this on applications where all blocks should be connected to a top block. * Recommend setting the 'disable' option to 'false' in the config so that * users don't try to re-enable disabled orphan blocks. * @param {!Blockly.Events.Abstract} event Custom data for event. */ function disableOrphans(event: Blockly.Events.Abstract): void; } declare module Blockly.Events { class Abstract extends Abstract__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Abstract__Class { /** * Abstract class for an event. * @constructor */ constructor(); /** * The workspace identifier for this event. * @type {string|undefined} */ workspaceId: string|any /*undefined*/; /** * The event group id for the group this event belongs to. Groups define * events that should be treated as an single action from the user's * perspective, and should be undone together. * @type {string} */ group: string; /** * Sets whether the event should be added to the undo stack. * @type {boolean} */ recordUndo: boolean; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Does this event record any change of state? * @return {boolean} True if null, false if something changed. */ isNull(): boolean; /** * Run an event. * @param {boolean} _forward True if run forward, false if run backward (undo). */ run(_forward: boolean): void; /** * Get workspace the event belongs to. * @return {!Blockly.Workspace} The workspace the event belongs to. * @throws {Error} if workspace is null. * @protected */ getEventWorkspace_(): Blockly.Workspace; } } declare module Blockly.Extensions { /** * Registers a new extension function. Extensions are functions that help * initialize blocks, usually adding dynamic behavior such as onchange * handlers and mutators. These are applied using Block.applyExtension(), or * the JSON "extensions" array attribute. * @param {string} name The name of this extension. * @param {Function} initFn The function to initialize an extended block. * @throws {Error} if the extension name is empty, the extension is already * registered, or extensionFn is not a function. */ function register(name: string, initFn: Function): void; /** * Registers a new extension function that adds all key/value of mixinObj. * @param {string} name The name of this extension. * @param {!Object} mixinObj The values to mix in. * @throws {Error} if the extension name is empty or the extension is already * registered. */ function registerMixin(name: string, mixinObj: Object): void; /** * Registers a new extension function that adds a mutator to the block. * At register time this performs some basic sanity checks on the mutator. * The wrapper may also add a mutator dialog to the block, if both compose and * decompose are defined on the mixin. * @param {string} name The name of this mutator extension. * @param {!Object} mixinObj The values to mix in. * @param {(function())=} opt_helperFn An optional function to apply after * mixing in the object. * @param {Array.<string>=} opt_blockList A list of blocks to appear in the * flyout of the mutator dialog. * @throws {Error} if the mutation is invalid or can't be applied to the block. */ function registerMutator(name: string, mixinObj: Object, opt_helperFn?: { (): any /*missing*/ }, opt_blockList?: string[]): void; /** * Unregisters the extension registered with the given name. * @param {string} name The name of the extension to unregister. */ function unregister(name: string): void; /** * Applies an extension method to a block. This should only be called during * block construction. * @param {string} name The name of the extension. * @param {!Blockly.Block} block The block to apply the named extension to. * @param {boolean} isMutator True if this extension defines a mutator. * @throws {Error} if the extension is not found. */ function apply(name: string, block: Blockly.Block, isMutator: boolean): void; /** * Builds an extension function that will map a dropdown value to a tooltip * string. * * This method includes multiple checks to ensure tooltips, dropdown options, * and message references are aligned. This aims to catch errors as early as * possible, without requiring developers to manually test tooltips under each * option. After the page is loaded, each tooltip text string will be checked * for matching message keys in the internationalized string table. Deferring * this until the page is loaded decouples loading dependencies. Later, upon * loading the first block of any given type, the extension will validate every * dropdown option has a matching tooltip in the lookupTable. Errors are * reported as warnings in the console, and are never fatal. * @param {string} dropdownName The name of the field whose value is the key * to the lookup table. * @param {!Object.<string, string>} lookupTable The table of field values to * tooltip text. * @return {Function} The extension function. */ function buildTooltipForDropdown(dropdownName: string, lookupTable: { [key: string]: string }): Function; /** * Builds an extension function that will install a dynamic tooltip. The * tooltip message should include the string '%1' and that string will be * replaced with the text of the named field. * @param {string} msgTemplate The template form to of the message text, with * %1 placeholder. * @param {string} fieldName The field with the replacement text. * @return {Function} The extension function. */ function buildTooltipWithFieldText(msgTemplate: string, fieldName: string): Function; } declare module Blockly { class Field extends Field__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Field__Class { /** * Abstract class for an editable field. * @param {*} value The initial value of the field. * @param {?Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a value & returns a validated * value, or null to abort the change. * @param {Object=} opt_config A map of options used to configure the field. See * the individual field's documentation for a list of properties this * parameter supports. * @constructor */ constructor(value: any, opt_validator?: Function, opt_config?: Object); /** * A generic value possessed by the field. * Should generally be non-null, only null when the field is created. * @type {*} * @protected */ value_: any; /** * Validation function called when user edits an editable field. * @type {Function} * @protected */ validator_: Function; /** * The size of the area rendered by the field. * @type {!Blockly.utils.Size} * @protected */ size_: Blockly.utils.Size; /** * Name of field. Unique within each block. * Static labels are usually unnamed. * @type {string|undefined} */ name: string|any /*undefined*/; /** * Has this field been disposed of? * @type {boolean} * @package */ disposed: boolean; /** * Maximum characters of text to display before adding an ellipsis. * @type {number} */ maxDisplayLength: number; /** * Block this field is attached to. Starts as null, then set in init. * @type {Blockly.Block} * @protected */ sourceBlock_: Blockly.Block; /** * Does this block need to be re-rendered? * @type {boolean} * @protected */ isDirty_: boolean; /** * Is the field visible, or hidden due to the block being collapsed? * @type {boolean} * @protected */ visible_: boolean; /** * A developer hook to override the returned text of this field. * Override if the text representation of the value of this field * is not just a string cast of its value. * @return {?string} Current text. Return null to resort to a string cast. * @protected */ getText_(): string; /** * Editable fields usually show some sort of UI indicating they are editable. * They will also be saved by the XML renderer. * @type {boolean} */ EDITABLE: boolean; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. This is not the * case by default so that SERIALIZABLE is backwards compatible. * @type {boolean} */ SERIALIZABLE: boolean; /** * Process the configuration map passed to the field. * @param {!Object} config A map of options used to configure the field. See * the individual field's documentation for a list of properties this * parameter supports. * @protected */ configure_(config: Object): void; /** * Attach this field to a block. * @param {!Blockly.Block} block The block containing this field. */ setSourceBlock(block: Blockly.Block): void; /** * Get the block this field is attached to. * @return {Blockly.Block} The block containing this field. */ getSourceBlock(): Blockly.Block; /** * Initialize everything to render this field. Override * methods initModel and initView rather than this method. * @package */ init(): void; /** * Create the block UI for this field. * @package */ initView(): void; /** * Initializes the model of the field after it has been installed on a block. * No-op by default. * @package */ initModel(): void; /** * Create a field border rect element. Not to be overridden by subclasses. * Instead modify the result of the function inside initView, or create a * separate function to call. * @protected */ createBorderRect_(): void; /** * Create a field text element. Not to be overridden by subclasses. Instead * modify the result of the function inside initView, or create a separate * function to call. * @protected */ createTextElement_(): void; /** * Bind events to the field. Can be overridden by subclasses if they need to do * custom input handling. * @protected */ bindEvents_(): void; /** * Sets the field's value based on the given XML element. Should only be * called by Blockly.Xml. * @param {!Element} fieldElement The element containing info about the * field's state. * @package */ fromXml(fieldElement: Element): void; /** * Serializes this field's value to XML. Should only be called by Blockly.Xml. * @param {!Element} fieldElement The element to populate with info about the * field's state. * @return {!Element} The element containing info about the field's state. * @package */ toXml(fieldElement: Element): Element; /** * Dispose of all DOM objects and events belonging to this editable field. * @package */ dispose(): void; /** * Add or remove the UI indicating if this field is editable or not. */ updateEditable(): void; /** * Check whether this field defines the showEditor_ function. * @return {boolean} Whether this field is clickable. */ isClickable(): boolean; /** * Check whether this field is currently editable. Some fields are never * EDITABLE (e.g. text labels). Other fields may be EDITABLE but may exist on * non-editable blocks. * @return {boolean} Whether this field is editable and on an editable block */ isCurrentlyEditable(): boolean; /** * Check whether this field should be serialized by the XML renderer. * Handles the logic for backwards compatibility and incongruous states. * @return {boolean} Whether this field should be serialized or not. */ isSerializable(): boolean; /** * Gets whether this editable field is visible or not. * @return {boolean} True if visible. */ isVisible(): boolean; /** * Sets whether this editable field is visible or not. Should only be called * by input.setVisible. * @param {boolean} visible True if visible. * @package */ setVisible(visible: boolean): void; /** * Sets a new validation function for editable fields, or clears a previously * set validator. * * The validator function takes in the new field value, and returns * validated value. The validated value could be the input value, a modified * version of the input value, or null to abort the change. * * If the function does not return anything (or returns undefined) the new * value is accepted as valid. This is to allow for fields using the * validated function as a field-level change event notification. * * @param {Function} handler The validator function * or null to clear a previous validator. */ setValidator(handler: Function): void; /** * Gets the validation function for editable fields, or null if not set. * @return {Function} Validation function, or null. */ getValidator(): Function; /** * Validates a change. Does nothing. Subclasses may override this. * @param {string} text The user's text. * @return {string} No change needed. * @deprecated May 2019. Override doClassValidation and other relevant 'do' * functions instead. */ classValidator(text: string): string; /** * Calls the validation function for this field, as well as all the validation * function for the field's class and its parents. * @param {string} text Proposed text. * @return {?string} Revised text, or null if invalid. * @deprecated May 2019. setValue now contains all relevant logic. */ callValidator(text: string): string; /** * Gets the group element for this editable field. * Used for measuring the size and for positioning. * @return {!SVGElement} The group element. */ getSvgRoot(): SVGElement; /** * Updates the field to match the colour/style of the block. Should only be * called by BlockSvg.updateColour(). * @package */ updateColour(): void; /** * Used by getSize() to move/resize any DOM elements, and get the new size. * * All rendering that has an effect on the size/shape of the block should be * done here, and should be triggered by getSize(). * @protected */ render_(): void; /** * Updates the width of the field. Redirects to updateSize_(). * @deprecated May 2019 Use Blockly.Field.updateSize_() to force an update * to the size of the field, or Blockly.utils.dom.getTextWidth() to * check the size of the field. */ updateWidth(): void; /** * Updates the size of the field based on the text. * @protected */ updateSize_(): void; /** * Returns the height and width of the field. * * This should *in general* be the only place render_ gets called from. * @return {!Blockly.utils.Size} Height and width. */ getSize(): Blockly.utils.Size; /** * Returns the bounding box of the rendered field, accounting for workspace * scaling. * @return {!Object} An object with top, bottom, left, and right in pixels * relative to the top left corner of the page (window coordinates). * @protected */ getScaledBBox_(): Object; /** * Get the text from this field to display on the block. May differ from * ``getText`` due to ellipsis, and other formatting. * @return {string} Text to display. * @protected */ getDisplayText_(): string; /** * Get the text from this field. * @return {string} Current text. */ getText(): string; /** * Set the text in this field. Trigger a rerender of the source block. * @param {*} _newText New text. * @deprecated 2019 setText should not be used directly. Use setValue instead. */ setText(_newText: any): void; /** * Force a rerender of the block that this field is installed on, which will * rerender this field and adjust for any sizing changes. * Other fields on the same block will not rerender, because their sizes have * already been recorded. * @package */ markDirty(): void; /** * Force a rerender of the block that this field is installed on, which will * rerender this field and adjust for any sizing changes. * Other fields on the same block will not rerender, because their sizes have * already been recorded. * @package */ forceRerender(): void; /** * Used to change the value of the field. Handles validation and events. * Subclasses should override doClassValidation_ and doValueUpdate_ rather * than this method. * @param {*} newValue New value. */ setValue(newValue: any): void; /** * Get the current value of the field. * @return {*} Current value. */ getValue(): any; /** * Used to validate a value. Returns input by default. Can be overridden by * subclasses, see FieldDropdown. * @param {*=} opt_newValue The value to be validated. * @return {*} The validated value, same as input by default. * @protected * @suppress {deprecated} Suppress deprecated this.classValidator call. */ doClassValidation_(opt_newValue?: any): any; /** * Used to update the value of a field. Can be overridden by subclasses to do * custom storage of values/updating of external things. * @param {*} newValue The value to be saved. * @protected */ doValueUpdate_(newValue: any): void; /** * Used to notify the field an invalid value was input. Can be overidden by * subclasses, see FieldTextInput. * No-op by default. * @param {*} _invalidValue The input value that was determined to be invalid. * @protected */ doValueInvalid_(_invalidValue: any): void; /** * Handle a mouse down event on a field. * @param {!Event} e Mouse down event. * @protected */ onMouseDown_(e: Event): void; /** * Change the tooltip text for this field. * @param {string|Function|!SVGElement} newTip Text for tooltip or a parent * element to link to for its tooltip. */ setTooltip(newTip: string|Function|SVGElement): void; /** * Whether this field references any Blockly variables. If true it may need to * be handled differently during serialization and deserialization. Subclasses * may override this. * @return {boolean} True if this field has any variable references. * @package */ referencesVariables(): boolean; /** * Search through the list of inputs and their fields in order to find the * parent input of a field. * @return {Blockly.Input} The input that the field belongs to. * @package */ getParentInput(): Blockly.Input; /** * Returns whether or not we should flip the field in RTL. * @return {boolean} True if we should flip in RTL. */ getFlipRtl(): boolean; /** * Returns whether or not the field is tab navigable. * @return {boolean} True if the field is tab navigable. */ isTabNavigable(): boolean; /** * Handles the given action. * This is only triggered when keyboard accessibility mode is enabled. * @param {!Blockly.Action} _action The action to be handled. * @return {boolean} True if the field handled the action, false otherwise. * @package */ onBlocklyAction(_action: Blockly.Action): boolean; /** * Add the cursor svg to this fields svg group. * @param {SVGElement} cursorSvg The svg root of the cursor to be added to the * field group. * @package */ setCursorSvg(cursorSvg: SVGElement): void; /** * Add the marker svg to this fields svg group. * @param {SVGElement} markerSvg The svg root of the marker to be added to the * field group. * @package */ setMarkerSvg(markerSvg: SVGElement): void; } } declare module Blockly.Field { /** * The default height of the border rect on any field. * @type {number} * @package */ var BORDER_RECT_DEFAULT_HEIGHT: number; /** * The default height of the text element on any field. * @type {number} * @package */ var TEXT_DEFAULT_HEIGHT: number; /** * The padding added to the width by the border rect, if it exists. * @type {number} * @package */ var X_PADDING: number; /** * The padding added to the height by the border rect, if it exists. * @type {number} * @package */ var Y_PADDING: number; /** * The default offset between the left of the text element and the left of the * border rect, if the border rect exists. * @type {number} */ var DEFAULT_TEXT_OFFSET: number; /** * Non-breaking space. * @const */ var NBSP: any /*missing*/; } declare module Blockly { class FieldAngle extends FieldAngle__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldAngle__Class extends Blockly.FieldTextInput__Class { /** * Class for an editable angle field. * @param {string|number=} opt_value The initial value of the field. Should cast * to a number. Defaults to 0. * @param {Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a number & returns a * validated number, or null to abort the change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/angle#creation} * for a list of properties this parameter supports. * @extends {Blockly.FieldTextInput} * @constructor */ constructor(opt_value?: string|number, opt_validator?: Function, opt_config?: Object); /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Create the block UI for this field. * @package */ initView(): void; /** * Set the angle to match the mouse's position. * @param {!Event} e Mouse move event. */ onMouseMove(e: Event): void; } } declare module Blockly.FieldAngle { /** * Construct a FieldAngle from a JSON arg object. * @param {!Object} options A JSON object with options (angle). * @return {!Blockly.FieldAngle} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldAngle; /** * The default amount to round angles to when using a mouse or keyboard nav * input. Must be a positive integer to support keyboard navigation. * @const {number} */ var ROUND: any /*missing*/; /** * Half the width of protractor image. * @const {number} */ var HALF: any /*missing*/; /** * Default property describing which direction makes an angle field's value * increase. Angle increases clockwise (true) or counterclockwise (false). * @const {boolean} */ var CLOCKWISE: any /*missing*/; /** * The default offset of 0 degrees (and all angles). Always offsets in the * counterclockwise direction, regardless of the field's clockwise property. * Usually either 0 (0 = right) or 90 (0 = up). * @const {number} */ var OFFSET: any /*missing*/; /** * The default maximum angle to allow before wrapping. * Usually either 360 (for 0 to 359.9) or 180 (for -179.9 to 180). * @const {number} */ var WRAP: any /*missing*/; /** * Radius of protractor circle. Slightly smaller than protractor size since * otherwise SVG crops off half the border at the edges. * @const {number} */ var RADIUS: any /*missing*/; } declare module Blockly { class FieldCheckbox extends FieldCheckbox__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldCheckbox__Class extends Blockly.Field__Class { /** * Class for a checkbox field. * @param {string|boolean=} opt_value The initial value of the field. Should * either be 'TRUE', 'FALSE' or a boolean. Defaults to 'FALSE'. * @param {Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a value ('TRUE' or 'FALSE') & * returns a validated value ('TRUE' or 'FALSE'), or null to abort the * change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/checkbox#creation} * for a list of properties this parameter supports. * @extends {Blockly.Field} * @constructor */ constructor(opt_value?: string|boolean, opt_validator?: Function, opt_config?: Object); /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Mouse cursor style when over the hotspot that initiates editability. */ CURSOR: any /*missing*/; /** * Used to tell if the field needs to be rendered the next time the block is * rendered. Checkbox fields are statically sized, and only need to be * rendered at initialization. * @type {boolean} * @protected */ isDirty_: boolean; /** * Create the block UI for this checkbox. * @package */ initView(): void; /** * Set the character used for the check mark. * @param {?string} character The character to use for the check mark, or * null to use the default. */ setCheckCharacter(character: string): void; /** * Toggle the state of the checkbox on click. * @protected */ showEditor_(): void; /** * Ensure that the input value is valid ('TRUE' or 'FALSE'). * @param {*=} opt_newValue The input value. * @return {?string} A valid value ('TRUE' or 'FALSE), or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Update the value of the field, and update the checkElement. * @param {*} newValue The value to be saved. The default validator guarantees * that this is a either 'TRUE' or 'FALSE'. * @protected */ doValueUpdate_(newValue: any): void; /** * Get the value of this field, either 'TRUE' or 'FALSE'. * @return {string} The value of this field. */ getValue(): string; /** * Get the boolean value of this field. * @return {boolean} The boolean value of this field. */ getValueBoolean(): boolean; /** * Get the text of this field. Used when the block is collapsed. * @return {string} Text representing the value of this field * ('true' or 'false'). */ getText(): string; } } declare module Blockly.FieldCheckbox { /** * Construct a FieldCheckbox from a JSON arg object. * @param {!Object} options A JSON object with options (checked). * @return {!Blockly.FieldCheckbox} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldCheckbox; /** * The width of a checkbox field. * @type {number} * @const */ var WIDTH: number; /** * Default character for the checkmark. * @type {string} * @const */ var CHECK_CHAR: string; /** * Used to correctly position the check mark. * @type {number} * @const */ var CHECK_X_OFFSET: number; /** * Used to correctly position the check mark. * @type {number} * @const */ var CHECK_Y_OFFSET: number; } declare module Blockly { class FieldColour extends FieldColour__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldColour__Class extends Blockly.Field__Class { /** * Class for a colour input field. * @param {string=} opt_value The initial value of the field. Should be in * '#rrggbb' format. Defaults to the first value in the default colour array. * @param {Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a colour string & returns a * validated colour string ('#rrggbb' format), or null to abort the change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/colour} * for a list of properties this parameter supports. * @extends {Blockly.Field} * @constructor */ constructor(opt_value?: string, opt_validator?: Function, opt_config?: Object); /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Mouse cursor style when over the hotspot that initiates the editor. */ CURSOR: any /*missing*/; /** * Used to tell if the field needs to be rendered the next time the block is * rendered. Colour fields are statically sized, and only need to be * rendered at initialization. * @type {boolean} * @protected */ isDirty_: boolean; /** * Create the block UI for this colour field. * @package */ initView(): void; /** * Ensure that the input value is a valid colour. * @param {*=} opt_newValue The input value. * @return {?string} A valid colour, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Update the value of this colour field, and update the displayed colour. * @param {*} newValue The value to be saved. The default validator guarantees * that this is a colour in '#rrggbb' format. * @protected */ doValueUpdate_(newValue: any): void; /** * Get the text for this field. Used when the block is collapsed. * @return {string} Text representing the value of this field. */ getText(): string; /** * Set a custom colour grid for this field. * @param {Array.<string>} colours Array of colours for this block, * or null to use default (Blockly.FieldColour.COLOURS). * @param {Array.<string>=} opt_titles Optional array of colour tooltips, * or null to use default (Blockly.FieldColour.TITLES). * @return {!Blockly.FieldColour} Returns itself (for method chaining). */ setColours(colours: string[], opt_titles?: string[]): Blockly.FieldColour; /** * Set a custom grid size for this field. * @param {number} columns Number of columns for this block, * or 0 to use default (Blockly.FieldColour.COLUMNS). * @return {!Blockly.FieldColour} Returns itself (for method chaining). */ setColumns(columns: number): Blockly.FieldColour; /** * Handles the given action. * This is only triggered when keyboard accessibility mode is enabled. * @param {!Blockly.Action} action The action to be handled. * @return {boolean} True if the field handled the action, false otherwise. * @package */ onBlocklyAction(action: Blockly.Action): boolean; } } declare module Blockly.FieldColour { /** * Construct a FieldColour from a JSON arg object. * @param {!Object} options A JSON object with options (colour). * @return {!Blockly.FieldColour} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldColour; /** * An array of colour strings for the palette. * Copied from goog.ui.ColorPicker.SIMPLE_GRID_COLORS * All colour pickers use this unless overridden with setColours. * @type {!Array.<string>} */ var COLOURS: string[]; /** * An array of tooltip strings for the palette. If not the same length as * COLOURS, the colour's hex code will be used for any missing titles. * All colour pickers use this unless overridden with setColours. * @type {!Array.<string>} */ var TITLES: string[]; /** * Number of columns in the palette. * All colour pickers use this unless overridden with setColumns. */ var COLUMNS: any /*missing*/; } declare module Blockly { class FieldDate extends FieldDate__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldDate__Class extends Blockly.Field__Class { /** * Class for a date input field. * @param {string=} opt_value The initial value of the field. Should be in * 'YYYY-MM-DD' format. Defaults to the current date. * @param {Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a date string & returns a * validated date string ('YYYY-MM-DD' format), or null to abort the change. * @extends {Blockly.Field} * @constructor */ constructor(opt_value?: string, opt_validator?: Function); /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Mouse cursor style when over the hotspot that initiates the editor. */ CURSOR: any /*missing*/; /** * Ensure that the input value is a valid date. * @param {*=} opt_newValue The input value. * @return {?string} A valid date, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Render the field. If the picker is shown make sure it has the current * date selected. * @protected */ render_(): void; /** * Updates the field's colours to match those of the block. * @package */ updateColour(): void; } } declare module Blockly.FieldDate { /** * Construct a FieldDate from a JSON arg object. * @param {!Object} options A JSON object with options (date). * @return {!Blockly.FieldDate} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldDate; } declare module goog { /** * Back up original getMsg function. * @type {!Function} */ var getMsgOrig: Function; /** * Gets a localized message. * Overrides the default Closure function to check for a Blockly.Msg first. * Used infrequently, only known case is TODAY button in date picker. * @param {string} str Translatable string, places holders in the form {$foo}. * @param {Object.<string, string>=} opt_values Maps place holder name to value. * @return {string} Message with placeholders filled. * @suppress {duplicate} */ function getMsg(str: string, opt_values?: { [key: string]: string }): string; } declare module goog.getMsg { /** * Mapping of Closure messages to Blockly.Msg names. */ var blocklyMsgMap: any /*missing*/; } declare module Blockly { class FieldDropdown extends FieldDropdown__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldDropdown__Class extends Blockly.Field__Class { /** * Class for an editable dropdown field. * @param {(!Array.<!Array>|!Function)} menuGenerator A non-empty array of * options for a dropdown list, or a function which generates these options. * @param {Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a language-neutral dropdown * option & returns a validated language-neutral dropdown option, or null to * abort the change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/dropdown#creation} * for a list of properties this parameter supports. * @extends {Blockly.Field} * @constructor * @throws {TypeError} If `menuGenerator` options are incorrectly structured. */ constructor(menuGenerator: any[][]|Function, opt_validator?: Function, opt_config?: Object); /** * An array of options for a dropdown list, * or a function which generates these options. * @type {(!Array.<!Array>| * !function(this:Blockly.FieldDropdown): !Array.<!Array>)} * @protected */ menuGenerator_: any[][]|{ (): any[][] }; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Mouse cursor style when over the hotspot that initiates the editor. */ CURSOR: any /*missing*/; /** * Create the block UI for this dropdown. * @package */ initView(): void; /** * Handle the selection of an item in the dropdown menu. * @param {!Blockly.Menu} menu The Menu component clicked. * @param {!Blockly.MenuItem} menuItem The MenuItem selected within menu. */ onItemSelected(menu: Blockly.Menu, menuItem: Blockly.MenuItem): void; /** * @return {boolean} True if the option list is generated by a function. * Otherwise false. */ isOptionListDynamic(): boolean; /** * Return a list of the options for this dropdown. * @param {boolean=} opt_useCache For dynamic options, whether or not to use the * cached options or to re-generate them. * @return {!Array.<!Array>} A non-empty array of option tuples: * (human-readable text or image, language-neutral name). * @throws {TypeError} If generated options are incorrectly structured. */ getOptions(opt_useCache?: boolean): any[][]; /** * Ensure that the input value is a valid language-neutral option. * @param {*=} opt_newValue The input value. * @return {?string} A valid language-neutral option, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Update the value of this dropdown field. * @param {*} newValue The value to be saved. The default validator guarantees * that this is one of the valid dropdown options. * @protected */ doValueUpdate_(newValue: any): void; /** * Updates the dropdown arrow to match the colour/style of the block. * @package */ updateColour(): void; /** * Handles the given action. * This is only triggered when keyboard accessibility mode is enabled. * @param {!Blockly.Action} action The action to be handled. * @return {boolean} True if the field handled the action, false otherwise. * @package */ onBlocklyAction(action: Blockly.Action): boolean; } } declare module Blockly.FieldDropdown { /** * Dropdown image properties. * @typedef {{ * src:string, * alt:string, * width:number, * height:number * }} */ interface ImageProperties { src: string; alt: string; width: number; height: number } /** * Construct a FieldDropdown from a JSON arg object. * @param {!Object} options A JSON object with options (options). * @return {!Blockly.FieldDropdown} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldDropdown; /** * Horizontal distance that a checkmark overhangs the dropdown. */ var CHECKMARK_OVERHANG: any /*missing*/; /** * Maximum height of the dropdown menu, as a percentage of the viewport height. */ var MAX_MENU_HEIGHT_VH: any /*missing*/; /** * Android can't (in 2014) display "▾", so use "▼" instead. */ var ARROW_CHAR: any /*missing*/; /** * Use the calculated prefix and suffix lengths to trim all of the options in * the given array. * @param {!Array.<!Array>} options Array of option tuples: * (human-readable text or image, language-neutral name). * @param {number} prefixLength The length of the common prefix. * @param {number} suffixLength The length of the common suffix * @return {!Array.<!Array>} A new array with all of the option text trimmed. */ function applyTrim_(options: any[][], prefixLength: number, suffixLength: number): any[][]; } declare module Blockly { class FieldImage extends FieldImage__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldImage__Class extends Blockly.Field__Class { /** * Class for an image on a block. * @param {string} src The URL of the image. Defaults to an empty string. * @param {!(string|number)} width Width of the image. * @param {!(string|number)} height Height of the image. * @param {string=} opt_alt Optional alt text for when block is collapsed. * @param {function(!Blockly.FieldImage)=} opt_onClick Optional function to be * called when the image is clicked. If opt_onClick is defined, opt_alt must * also be defined. * @param {boolean=} opt_flipRtl Whether to flip the icon in RTL. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/image#creation} * for a list of properties this parameter supports. * @extends {Blockly.Field} * @constructor */ constructor(src: string, width: string|number, height: string|number, opt_alt?: string, opt_onClick?: { (_0: Blockly.FieldImage): any /*missing*/ }, opt_flipRtl?: boolean, opt_config?: Object); /** * Editable fields usually show some sort of UI indicating they are * editable. This field should not. * @type {boolean} */ EDITABLE: boolean; /** * Used to tell if the field needs to be rendered the next time the block is * rendered. Image fields are statically sized, and only need to be * rendered at initialization. * @type {boolean} * @protected */ isDirty_: boolean; /** * Create the block UI for this image. * @package */ initView(): void; /** * Ensure that the input value (the source URL) is a string. * @param {*=} opt_newValue The input value. * @return {?string} A string, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Update the value of this image field, and update the displayed image. * @param {*} newValue The value to be saved. The default validator guarantees * that this is a string. * @protected */ doValueUpdate_(newValue: any): void; /** * Set the alt text of this image. * @param {?string} alt New alt text. * @public */ setAlt(alt: string): void; /** * If field click is called, and click handler defined, * call the handler. */ showEditor_(): void; /** * Set the function that is called when this image is clicked. * @param {?function(!Blockly.FieldImage)} func The function that is called * when the image is clicked, or null to remove. */ setOnClickHandler(func: { (_0: Blockly.FieldImage): any /*missing*/ }): void; } } declare module Blockly.FieldImage { /** * Construct a FieldImage from a JSON arg object, * dereferencing any string table references. * @param {!Object} options A JSON object with options (src, width, height, * alt, and flipRtl). * @return {!Blockly.FieldImage} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldImage; } declare module Blockly { class FieldLabel extends FieldLabel__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldLabel__Class extends Blockly.Field__Class { /** * Class for a non-editable, non-serializable text field. * @param {string=} opt_value The initial value of the field. Should cast to a * string. Defaults to an empty string if null or undefined. * @param {string=} opt_class Optional CSS class for the field's text. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/label#creation} * for a list of properties this parameter supports. * @extends {Blockly.Field} * @constructor */ constructor(opt_value?: string, opt_class?: string, opt_config?: Object); /** * Editable fields usually show some sort of UI indicating they are * editable. This field should not. * @type {boolean} */ EDITABLE: boolean; /** * Create block UI for this label. * @package */ initView(): void; /** * Ensure that the input value casts to a valid string. * @param {*=} opt_newValue The input value. * @return {?string} A valid string, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Set the css class applied to the field's textElement_. * @param {?string} cssClass The new css class name, or null to remove. */ setClass(cssClass: string): void; } } declare module Blockly.FieldLabel { /** * Construct a FieldLabel from a JSON arg object, * dereferencing any string table references. * @param {!Object} options A JSON object with options (text, and class). * @return {!Blockly.FieldLabel} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldLabel; } declare module Blockly { class FieldLabelSerializable extends FieldLabelSerializable__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldLabelSerializable__Class extends Blockly.FieldLabel__Class { /** * Class for a non-editable, serializable text field. * @param {*} opt_value The initial value of the field. Should cast to a * string. Defaults to an empty string if null or undefined. * @param {string=} opt_class Optional CSS class for the field's text. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/label-serializable#creation} * for a list of properties this parameter supports. * @extends {Blockly.FieldLabel} * @constructor * */ constructor(opt_value: any, opt_class?: string, opt_config?: Object); /** * Editable fields usually show some sort of UI indicating they are * editable. This field should not. * @type {boolean} */ EDITABLE: boolean; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. This field should be serialized, but only edited programmatically. * @type {boolean} */ SERIALIZABLE: boolean; } } declare module Blockly.FieldLabelSerializable { /** * Construct a FieldLabelSerializable from a JSON arg object, * dereferencing any string table references. * @param {!Object} options A JSON object with options (text, and class). * @return {!Blockly.FieldLabelSerializable} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldLabelSerializable; } declare module Blockly { class FieldMultilineInput extends FieldMultilineInput__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldMultilineInput__Class extends Blockly.FieldTextInput__Class { /** * Class for an editable text area field. * @param {string=} opt_value The initial content of the field. Should cast to a * string. Defaults to an empty string if null or undefined. * @param {Function=} opt_validator An optional function that is called * to validate any constraints on what the user entered. Takes the new * text as an argument and returns either the accepted text, a replacement * text, or null to abort the change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/text-input#creation} * for a list of properties this parameter supports. * @extends {Blockly.FieldTextInput} * @constructor */ constructor(opt_value?: string, opt_validator?: Function, opt_config?: Object); /** * Create the block UI for this field. * @package */ initView(): void; /** * Updates the text of the textElement. * @protected */ render_(): void; /** * Updates the size of the field based on the text. * @protected */ updateSize_(): void; /** * Resize the editor to fit the text. * @protected */ resizeEditor_(): void; /** * Create the text input editor widget. * @return {!HTMLTextAreaElement} The newly created text input editor. * @protected */ widgetCreate_(): HTMLTextAreaElement; /** * Handle key down to the editor. Override the text input definition of this * so as to not close the editor when enter is typed in. * @param {!Event} e Keyboard event. * @protected */ onHtmlInputKeyDown_(e: Event): void; } } declare module Blockly.FieldMultilineInput { /** * The default height of a single line of text. * @type {number} * @const */ var LINE_HEIGHT: number; /** * Construct a FieldMultilineInput from a JSON arg object, * dereferencing any string table references. * @param {!Object} options A JSON object with options (text, and spellcheck). * @return {!Blockly.FieldMultilineInput} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldMultilineInput; } declare module Blockly { class FieldNumber extends FieldNumber__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldNumber__Class extends Blockly.FieldTextInput__Class { /** * Class for an editable number field. * @param {string|number=} opt_value The initial value of the field. Should cast * to a number. Defaults to 0. * @param {?(string|number)=} opt_min Minimum value. * @param {?(string|number)=} opt_max Maximum value. * @param {?(string|number)=} opt_precision Precision for value. * @param {?Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a number & returns a validated * number, or null to abort the change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/number#creation} * for a list of properties this parameter supports. * @extends {Blockly.FieldTextInput} * @constructor */ constructor(opt_value?: string|number, opt_min?: string|number, opt_max?: string|number, opt_precision?: string|number, opt_validator?: Function, opt_config?: Object); /** * The minimum value this number field can contain. * @type {number} * @protected */ min_: number; /** * The maximum value this number field can contain. * @type {number} * @protected */ max_: number; /** * The multiple to which this fields value is rounded. * @type {number} * @protected */ precision_: number; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Set the maximum, minimum and precision constraints on this field. * Any of these properties may be undefined or NaN to be disabled. * Setting precision (usually a power of 10) enforces a minimum step between * values. That is, the user's value will rounded to the closest multiple of * precision. The least significant digit place is inferred from the precision. * Integers values can be enforces by choosing an integer precision. * @param {?(number|string|undefined)} min Minimum value. * @param {?(number|string|undefined)} max Maximum value. * @param {?(number|string|undefined)} precision Precision for value. */ setConstraints(min: number|string|any /*undefined*/, max: number|string|any /*undefined*/, precision: number|string|any /*undefined*/): void; /** * Sets the minimum value this field can contain. Updates the value to reflect. * @param {?(number|string|undefined)} min Minimum value. */ setMin(min: number|string|any /*undefined*/): void; /** * Returns the current minimum value this field can contain. Default is * -Infinity. * @return {number} The current minimum value this field can contain. */ getMin(): number; /** * Sets the maximum value this field can contain. Updates the value to reflect. * @param {?(number|string|undefined)} max Maximum value. */ setMax(max: number|string|any /*undefined*/): void; /** * Returns the current maximum value this field can contain. Default is * Infinity. * @return {number} The current maximum value this field can contain. */ getMax(): number; /** * Sets the precision of this field's value, i.e. the number to which the * value is rounded. Updates the field to reflect. * @param {?(number|string|undefined)} precision The number to which the * field's value is rounded. */ setPrecision(precision: number|string|any /*undefined*/): void; /** * Returns the current precision of this field. The precision being the * number to which the field's value is rounded. A precision of 0 means that * the value is not rounded. * @return {number} The number to which this field's value is rounded. */ getPrecision(): number; } } declare module Blockly.FieldNumber { /** * Construct a FieldNumber from a JSON arg object. * @param {!Object} options A JSON object with options (value, min, max, and * precision). * @return {!Blockly.FieldNumber} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldNumber; } declare module Blockly.fieldRegistry { /** * Registers a field type. * Blockly.fieldRegistry.fromJson uses this registry to * find the appropriate field type. * @param {string} type The field type name as used in the JSON definition. * @param {!{fromJson: Function}} fieldClass The field class containing a * fromJson function that can construct an instance of the field. * @throws {Error} if the type name is empty, the field is already * registered, or the fieldClass is not an object containing a fromJson * function. */ function register(type: string, fieldClass: { fromJson: Function }): void; /** * Unregisters the field registered with the given type. * @param {string} type The field type name as used in the JSON definition. */ function unregister(type: string): void; /** * Construct a Field from a JSON arg object. * Finds the appropriate registered field by the type name as registered using * Blockly.fieldRegistry.register. * @param {!Object} options A JSON object with a type and options specific * to the field type. * @return {Blockly.Field} The new field instance or null if a field wasn't * found with the given type name * @package */ function fromJson(options: Object): Blockly.Field; } declare module Blockly { class FieldTextInput extends FieldTextInput__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldTextInput__Class extends Blockly.Field__Class { /** * Class for an editable text field. * @param {string=} opt_value The initial value of the field. Should cast to a * string. Defaults to an empty string if null or undefined. * @param {?Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a string & returns a validated * string, or null to abort the change. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/text-input#creation} * for a list of properties this parameter supports. * @extends {Blockly.Field} * @constructor */ constructor(opt_value?: string, opt_validator?: Function, opt_config?: Object); /** * Allow browser to spellcheck this field. * @type {boolean} * @protected */ spellcheck_: boolean; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Mouse cursor style when over the hotspot that initiates the editor. */ CURSOR: any /*missing*/; /** * Ensure that the input value casts to a valid string. * @param {*=} opt_newValue The input value. * @return {*} A valid string, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): any; /** * Called by setValue if the text input is not valid. If the field is * currently being edited it reverts value of the field to the previous * value while allowing the display text to be handled by the htmlInput_. * @param {*} _invalidValue The input value that was determined to be invalid. * This is not used by the text input because its display value is stored on * the htmlInput_. * @protected */ doValueInvalid_(_invalidValue: any): void; /** * Called by setValue if the text input is valid. Updates the value of the * field, and updates the text of the field if it is not currently being * edited (i.e. handled by the htmlInput_). * @param {*} newValue The value to be saved. The default validator guarantees * that this is a string. * @protected */ doValueUpdate_(newValue: any): void; /** * Updates the colour of the htmlInput given the current validity of the * field's value. * @protected */ render_(): void; /** * Set whether this field is spellchecked by the browser. * @param {boolean} check True if checked. */ setSpellcheck(check: boolean): void; /** * Show the inline free-text editor on top of the text. * @param {boolean=} opt_quietInput True if editor should be created without * focus. Defaults to false. * @protected */ showEditor_(opt_quietInput?: boolean): void; /** * Create the text input editor widget. * @return {!HTMLElement} The newly created text input editor. * @protected */ widgetCreate_(): HTMLElement; /** * Bind handlers for user input on the text input field's editor. * @param {!HTMLElement} htmlInput The htmlInput to which event * handlers will be bound. * @protected */ bindInputEvents_(htmlInput: HTMLElement): void; /** * Handle key down to the editor. * @param {!Event} e Keyboard event. * @protected */ onHtmlInputKeyDown_(e: Event): void; /** * Set the html input value and the field's internal value. The difference * between this and ``setValue`` is that this also updates the html input * value whilst editing. * @param {*} newValue New value. * @protected */ setEditorValue_(newValue: any): void; /** * Resize the editor to fit the text. * @protected */ resizeEditor_(): void; /** * Transform the provided value into a text to show in the html input.
* `getValueFromEditorText_`. * @param {*} value The value stored in this field. * @returns {string} The text to show on the html input. * @protected */ getEditorText_(value: any): string; /** * Transform the text received from the html input into a value to store * in this field. * Override this method if the field's html input representation is different * than the field's value. This should be coupled with an override of * `getEditorText_`. * @param {string} text Text received from the html input. * @returns {*} The value to store. * @protected */ getValueFromEditorText_(text: string): any; } } declare module Blockly.FieldTextInput { /** * Construct a FieldTextInput from a JSON arg object, * dereferencing any string table references. * @param {!Object} options A JSON object with options (text, and spellcheck). * @return {!Blockly.FieldTextInput} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldTextInput; /** * Point size of text. Should match blocklyText's font-size in CSS. */ var FONTSIZE: any /*missing*/; /** * Pixel size of input border radius. * Should match blocklyText's border-radius in CSS. */ var BORDERRADIUS: any /*missing*/; /** * Ensure that only a number may be entered. * @param {string} text The user's text. * @return {?string} A string representing a valid number, or null if invalid. * @deprecated */ function numberValidator(text: string): string; /** * Ensure that only a non-negative integer may be entered. * @param {string} text The user's text. * @return {?string} A string representing a valid int, or null if invalid. * @deprecated */ function nonnegativeIntegerValidator(text: string): string; } declare module Blockly { class FieldVariable extends FieldVariable__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FieldVariable__Class extends Blockly.FieldDropdown__Class { /** * Class for a variable's dropdown field. * @param {?string} varName The default name for the variable. If null, * a unique variable name will be generated. * @param {Function=} opt_validator A function that is called to validate * changes to the field's value. Takes in a variable ID & returns a * validated variable ID, or null to abort the change. * @param {Array.<string>=} opt_variableTypes A list of the types of variables * to include in the dropdown. * @param {string=} opt_defaultType The type of variable to create if this * field's value is not explicitly set. Defaults to ''. * @param {Object=} opt_config A map of options used to configure the field. * See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/variable#creation} * for a list of properties this parameter supports. * @extends {Blockly.FieldDropdown} * @constructor */ constructor(varName: string, opt_validator?: Function, opt_variableTypes?: string[], opt_defaultType?: string, opt_config?: Object); /** * An array of options for a dropdown list, * or a function which generates these options. * @type {!function(this:Blockly.FieldVariable): !Array.<!Array>} * @protected */ menuGenerator_: { (): any[][] }; /** * The initial variable name passed to this field's constructor, or an * empty string if a name wasn't provided. Used to create the initial * variable. * @type {string} */ defaultVariableName: string; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should also be serializable. * @type {boolean} */ SERIALIZABLE: boolean; /** * Configure the field based on the given map of options. * @param {!Object} config A map of options to configure the field based on. * @protected */ configure_(config: Object): void; /** * Initialize the model for this field if it has not already been initialized. * If the value has not been set to a variable by the first render, we make up a * variable rather than let the value be invalid. * @package */ initModel(): void; /** * Initialize this field based on the given XML. * @param {!Element} fieldElement The element containing information about the * variable field's state. */ fromXml(fieldElement: Element): void; /** * Serialize this field to XML. * @param {!Element} fieldElement The element to populate with info about the * field's state. * @return {!Element} The element containing info about the field's state. */ toXml(fieldElement: Element): Element; /** * Attach this field to a block. * @param {!Blockly.Block} block The block containing this field. */ setSourceBlock(block: Blockly.Block): void; /** * Get the variable's ID. * @return {string} Current variable's ID. */ getValue(): string; /** * Get the text from this field, which is the selected variable's name. * @return {string} The selected variable's name, or the empty string if no * variable is selected. */ getText(): string; /** * Get the variable model for the selected variable. * Not guaranteed to be in the variable map on the workspace (e.g. if accessed * after the variable has been deleted). * @return {Blockly.VariableModel} The selected variable, or null if none was * selected. * @package */ getVariable(): Blockly.VariableModel; /** * Gets the validation function for this field, or null if not set. * Returns null if the variable is not set, because validators should not * run on the initial setValue call, because the field won't be attached to * a block and workspace at that point. * @return {Function} Validation function, or null. */ getValidator(): Function; /** * Ensure that the id belongs to a valid variable of an allowed type. * @param {*=} opt_newValue The id of the new variable to set. * @return {?string} The validated id, or null if invalid. * @protected */ doClassValidation_(opt_newValue?: any): string; /** * Update the value of this variable field, as well as its variable and text. * * The variable ID should be valid at this point, but if a variable field * validator returns a bad ID, this could break. * @param {*} newId The value to be saved. * @protected */ doValueUpdate_(newId: any): void; /** * Refreshes the name of the variable by grabbing the name of the model. * Used when a variable gets renamed, but the ID stays the same. Should only * be called by the block. * @package */ refreshVariableName(): void; /** * Handle the selection of an item in the variable dropdown menu. * Special case the 'Rename variable...' and 'Delete variable...' options. * In the rename case, prompt the user for a new name. * @param {!Blockly.Menu} menu The Menu component clicked. * @param {!Blockly.MenuItem} menuItem The MenuItem selected within menu. */ onItemSelected(menu: Blockly.Menu, menuItem: Blockly.MenuItem): void; } } declare module Blockly.FieldVariable { /** * Construct a FieldVariable from a JSON arg object, * dereferencing any string table references. * @param {!Object} options A JSON object with options (variable, * variableTypes, and defaultType). * @return {!Blockly.FieldVariable} The new field instance. * @package * @nocollapse */ function fromJson(options: Object): Blockly.FieldVariable; /** * Return a sorted list of variable names for variable dropdown menus. * Include a special option at the end for creating a new variable name. * @return {!Array.<!Array>} Array of variable names/id tuples. * @this {Blockly.FieldVariable} */ function dropdownCreate(): any[][]; } declare module Blockly { class Flyout extends Flyout__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Flyout__Class { /** * Class for a flyout. * @param {!Object} workspaceOptions Dictionary of options for the workspace. * @constructor */ constructor(workspaceOptions: Object); /** * @type {!Blockly.WorkspaceSvg} * @protected */ workspace_: Blockly.WorkspaceSvg; /** * Is RTL vs LTR. * @type {boolean} */ RTL: boolean; /** * Position of the toolbox and flyout relative to the workspace. * @type {number} * @protected */ toolboxPosition_: number; /** * List of visible buttons. * @type {!Array.<!Blockly.FlyoutButton>} * @protected */ buttons_: Blockly.FlyoutButton[]; /** * Width of output tab. * @type {number} * @const */ tabWidth_: number; /** * Does the flyout automatically close when a block is created? * @type {boolean} */ autoClose: boolean; /** * Corner radius of the flyout background. * @type {number} * @const */ CORNER_RADIUS: number; /** * Margin around the edges of the blocks in the flyout. * @type {number} * @const */ MARGIN: number; /** * Gap between items in horizontal flyouts. Can be overridden with the "sep" * element. * @const {number} */ GAP_X: any /*missing*/; /** * Gap between items in vertical flyouts. Can be overridden with the "sep" * element. * @const {number} */ GAP_Y: any /*missing*/; /** * Top/bottom padding between scrollbar and edge of flyout background. * @type {number} * @const */ SCROLLBAR_PADDING: number; /** * Width of flyout. * @type {number} * @protected */ width_: number; /** * Height of flyout. * @type {number} * @protected */ height_: number; /** * Range of a drag angle from a flyout considered "dragging toward workspace". * Drags that are within the bounds of this many degrees from the orthogonal * line to the flyout edge are considered to be "drags toward the workspace". * Example: * Flyout Edge Workspace * [block] / <-within this angle, drags "toward workspace" | * [block] ---- orthogonal to flyout boundary ---- | * [block] \ | * The angle is given in degrees from the orthogonal. * * This is used to know when to create a new block and when to scroll the * flyout. Setting it to 360 means that all drags create a new block. * @type {number} * @protected */ dragAngleRange_: number; /** * Creates the flyout's DOM. Only needs to be called once. The flyout can * either exist as its own svg element or be a g element nested inside a * separate svg element. * @param {string} tagName The type of tag to put the flyout in. This * should be <svg> or <g>. * @return {!SVGElement} The flyout's SVG group. */ createDom(tagName: string): SVGElement; /** * Initializes the flyout. * @param {!Blockly.Workspace} targetWorkspace The workspace in which to create * new blocks. */ init(targetWorkspace: Blockly.Workspace): void; /** * Dispose of this flyout. * Unlink from all DOM elements to prevent memory leaks. */ dispose(): void; /** * Get the width of the flyout. * @return {number} The width of the flyout. */ getWidth(): number; /** * Get the height of the flyout. * @return {number} The width of the flyout. */ getHeight(): number; /** * Get the workspace inside the flyout. * @return {!Blockly.WorkspaceSvg} The workspace inside the flyout. * @package */ getWorkspace(): Blockly.WorkspaceSvg; /** * Is the flyout visible? * @return {boolean} True if visible. */ isVisible(): boolean; /** * Set whether the flyout is visible. A value of true does not necessarily mean * that the flyout is shown. It could be hidden because its container is hidden. * @param {boolean} visible True if visible. */ setVisible(visible: boolean): void; /** * Set whether this flyout's container is visible. * @param {boolean} visible Whether the container is visible. */ setContainerVisible(visible: boolean): void; /** * Update the view based on coordinates calculated in position(). * @param {number} width The computed width of the flyout's SVG group * @param {number} height The computed height of the flyout's SVG group. * @param {number} x The computed x origin of the flyout's SVG group. * @param {number} y The computed y origin of the flyout's SVG group. * @protected */ positionAt_(width: number, height: number, x: number, y: number): void; /** * Hide and empty the flyout. */ hide(): void; /** * Show and populate the flyout. * @param {!Array|string} xmlList List of blocks to show. * Variables and procedures have a custom set of blocks. */ show(xmlList: any[]|string): void; /** * Add listeners to a block that has been added to the flyout. * @param {!SVGElement} root The root node of the SVG group the block is in. * @param {!Blockly.Block} block The block to add listeners for. * @param {!SVGElement} rect The invisible rectangle under the block that acts * as a mat for that block. * @protected */ addBlockListeners_(root: SVGElement, block: Blockly.Block, rect: SVGElement): void; /** * Does this flyout allow you to create a new instance of the given block? * Used for deciding if a block can be "dragged out of" the flyout. * @param {!Blockly.BlockSvg} block The block to copy from the flyout. * @return {boolean} True if you can create a new instance of the block, false * otherwise. * @package */ isBlockCreatable_(block: Blockly.BlockSvg): boolean; /** * Create a copy of this block on the workspace. * @param {!Blockly.BlockSvg} originalBlock The block to copy from the flyout. * @return {Blockly.BlockSvg} The newly created block, or null if something * went wrong with deserialization. * @package */ createBlock(originalBlock: Blockly.BlockSvg): Blockly.BlockSvg; /** * Initialize the given button: move it to the correct location, * add listeners, etc. * @param {!Blockly.FlyoutButton} button The button to initialize and place. * @param {number} x The x position of the cursor during this layout pass. * @param {number} y The y position of the cursor during this layout pass. * @protected */ initFlyoutButton_(button: Blockly.FlyoutButton, x: number, y: number): void; /** * Create and place a rectangle corresponding to the given block. * @param {!Blockly.Block} block The block to associate the rect to. * @param {number} x The x position of the cursor during this layout pass. * @param {number} y The y position of the cursor during this layout pass. * @param {!{height: number, width: number}} blockHW The height and width of the * block. * @param {number} index The index into the mats list where this rect should be * placed. * @return {!SVGElement} Newly created SVG element for the rectangle behind the * block. * @protected */ createRect_(block: Blockly.Block, x: number, y: number, blockHW: { height: number; width: number }, index: number): SVGElement; /** * Move a rectangle to sit exactly behind a block, taking into account tabs, * hats, and any other protrusions we invent. * @param {!SVGElement} rect The rectangle to move directly behind the block. * @param {!Blockly.BlockSvg} block The block the rectangle should be behind. * @protected */ moveRectToBlock_(rect: SVGElement, block: Blockly.BlockSvg): void; /** * Reflow blocks and their mats. */ reflow(): void; /** * @return {boolean} True if this flyout may be scrolled with a scrollbar or by * dragging. * @package */ isScrollable(): boolean; } } declare module Blockly { class FlyoutButton extends FlyoutButton__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FlyoutButton__Class { /** * Class for a button in the flyout. * @param {!Blockly.WorkspaceSvg} workspace The workspace in which to place this * button. * @param {!Blockly.WorkspaceSvg} targetWorkspace The flyout's target workspace. * @param {!Element} xml The XML specifying the label/button. * @param {boolean} isLabel Whether this button should be styled as a label. * @constructor */ constructor(workspace: Blockly.WorkspaceSvg, targetWorkspace: Blockly.WorkspaceSvg, xml: Element, isLabel: boolean); /** * The width of the button's rect. * @type {number} */ width: number; /** * The height of the button's rect. * @type {number} */ height: number; /** * Create the button elements. * @return {!SVGElement} The button's SVG group. */ createDom(): SVGElement; /** * Correctly position the flyout button and make it visible. */ show(): void; /** * Move the button to the given x, y coordinates. * @param {number} x The new x coordinate. * @param {number} y The new y coordinate. */ moveTo(x: number, y: number): void; /** * Location of the button. * @return {!Blockly.utils.Coordinate} x, y coordinates. * @package */ getPosition(): Blockly.utils.Coordinate; /** * Get the button's target workspace. * @return {!Blockly.WorkspaceSvg} The target workspace of the flyout where this * button resides. */ getTargetWorkspace(): Blockly.WorkspaceSvg; /** * Dispose of this button. */ dispose(): void; } } declare module Blockly.FlyoutButton { /** * The margin around the text in the button. */ var MARGIN: any /*missing*/; } declare module Blockly { class FlyoutDragger extends FlyoutDragger__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FlyoutDragger__Class extends Blockly.WorkspaceDragger__Class { /** * Class for a flyout dragger. It moves a flyout workspace around when it is * being dragged by a mouse or touch. * Note that the workspace itself manages whether or not it has a drag surface * and how to do translations based on that. This simply passes the right * commands based on events. * @param {!Blockly.Flyout} flyout The flyout to drag. * @extends {Blockly.WorkspaceDragger} * @constructor */ constructor(flyout: Blockly.Flyout); /** * Move the flyout based on the most recent mouse movements. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel coordinates. * @package */ drag(currentDragDeltaXY: Blockly.utils.Coordinate): void; } } declare module Blockly { class HorizontalFlyout extends HorizontalFlyout__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class HorizontalFlyout__Class extends Blockly.Flyout__Class { /** * Class for a flyout. * @param {!Object} workspaceOptions Dictionary of options for the workspace. * @extends {Blockly.Flyout} * @constructor */ constructor(workspaceOptions: Object); /** * Move the flyout to the edge of the workspace. */ position(): void; /** * Scroll the flyout to the top. */ scrollToStart(): void; /** * Determine if a drag delta is toward the workspace, based on the position * and orientation of the flyout. This is used in determineDragIntention_ to * determine if a new block should be created or if the flyout should scroll. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at mouse down, in pixel units. * @return {boolean} True if the drag is toward the workspace. * @package */ isDragTowardWorkspace(currentDragDeltaXY: Blockly.utils.Coordinate): boolean; /** * Return the deletion rectangle for this flyout in viewport coordinates. * @return {Blockly.utils.Rect} Rectangle in which to delete. */ getClientRect(): Blockly.utils.Rect; } } declare module Blockly { class VerticalFlyout extends VerticalFlyout__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VerticalFlyout__Class extends Blockly.Flyout__Class { /** * Class for a flyout. * @param {!Object} workspaceOptions Dictionary of options for the workspace. * @extends {Blockly.Flyout} * @constructor */ constructor(workspaceOptions: Object); /** * Move the flyout to the edge of the workspace. */ position(): void; /** * Scroll the flyout to the top. */ scrollToStart(): void; /** * Determine if a drag delta is toward the workspace, based on the position * and orientation of the flyout. This is used in determineDragIntention_ to * determine if a new block should be created or if the flyout should scroll. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at mouse down, in pixel units. * @return {boolean} True if the drag is toward the workspace. * @package */ isDragTowardWorkspace(currentDragDeltaXY: Blockly.utils.Coordinate): boolean; /** * Return the deletion rectangle for this flyout in viewport coordinates. * @return {Blockly.utils.Rect} Rectangle in which to delete. */ getClientRect(): Blockly.utils.Rect; } } declare module Blockly { class Generator extends Generator__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Generator__Class { /** * Class for a code generator that translates the blocks into a language. * @param {string} name Language name of this generator. * @constructor */ constructor(name: string); /** * Arbitrary code to inject into locations that risk causing infinite loops. * Any instances of '%1' will be replaced by the block ID that failed. * E.g. ' checkTimeout(%1);\n' * @type {?string} */ INFINITE_LOOP_TRAP: string; /** * Arbitrary code to inject before every statement. * Any instances of '%1' will be replaced by the block ID of the statement. * E.g. 'highlight(%1);\n' * @type {?string} */ STATEMENT_PREFIX: string; /** * Arbitrary code to inject after every statement. * Any instances of '%1' will be replaced by the block ID of the statement. * E.g. 'highlight(%1);\n' * @type {?string} */ STATEMENT_SUFFIX: string; /** * The method of indenting. Defaults to two spaces, but language generators * may override this to increase indent or change to tabs. * @type {string} */ INDENT: string; /** * Maximum length for a comment before wrapping. Does not account for * indenting level. * @type {number} */ COMMENT_WRAP: number; /** * List of outer-inner pairings that do NOT require parentheses. * @type {!Array.<!Array.<number>>} */ ORDER_OVERRIDES: number[][]; /** * Generate code for all blocks in the workspace to the specified language. * @param {Blockly.Workspace} workspace Workspace to generate code from. * @return {string} Generated code. */ workspaceToCode(workspace: Blockly.Workspace): string; /** * Prepend a common prefix onto each line of code. * Intended for indenting code or adding comment markers. * @param {string} text The lines of code. * @param {string} prefix The common prefix. * @return {string} The prefixed lines of code. */ prefixLines(text: string, prefix: string): string; /** * Recursively spider a tree of blocks, returning all their comments. * @param {!Blockly.Block} block The block from which to start spidering. * @return {string} Concatenated list of comments. */ allNestedComments(block: Blockly.Block): string; /** * Generate code for the specified block (and attached blocks). * @param {Blockly.Block} block The block to generate code for. * @param {boolean=} opt_thisOnly True to generate code for only this statement. * @return {string|!Array} For statement blocks, the generated code. * For value blocks, an array containing the generated code and an * operator order value. Returns '' if block is null. */ blockToCode(block: Blockly.Block, opt_thisOnly?: boolean): string|any[]; /** * Generate code representing the specified value input. * @param {!Blockly.Block} block The block containing the input. * @param {string} name The name of the input. * @param {number} outerOrder The maximum binding strength (minimum order value) * of any operators adjacent to "block". * @return {string} Generated code or '' if no blocks are connected or the * specified input does not exist. */ valueToCode(block: Blockly.Block, name: string, outerOrder: number): string; /** * Generate a code string representing the blocks attached to the named * statement input. Indent the code. * This is mainly used in generators. When trying to generate code to evaluate * look at using workspaceToCode or blockToCode. * @param {!Blockly.Block} block The block containing the input. * @param {string} name The name of the input. * @return {string} Generated code or '' if no blocks are connected. */ statementToCode(block: Blockly.Block, name: string): string; /** * Add an infinite loop trap to the contents of a loop. * Add statement suffix at the start of the loop block (right after the loop * statement executes), and a statement prefix to the end of the loop block * (right before the loop statement executes). * @param {string} branch Code for loop contents. * @param {!Blockly.Block} block Enclosing block. * @return {string} Loop contents, with infinite loop trap added. */ addLoopTrap(branch: string, block: Blockly.Block): string; /** * Inject a block ID into a message to replace '%1'. * Used for STATEMENT_PREFIX, STATEMENT_SUFFIX, and INFINITE_LOOP_TRAP. * @param {string} msg Code snippet with '%1'. * @param {!Blockly.Block} block Block which has an ID. * @return {string} Code snippet with ID. */ injectId(msg: string, block: Blockly.Block): string; /** * Add one or more words to the list of reserved words for this language. * @param {string} words Comma-separated list of words to add to the list. * No spaces. Duplicates are ok. */ addReservedWords(words: string): void; /** * Hook for code to run before code generation starts. * Subclasses may override this, e.g. to initialise the database of variable * names. * @param {!Blockly.Workspace} _workspace Workspace to generate code from. */ init(_workspace: Blockly.Workspace): void; /** * Hook for code to run at end of code generation. * Subclasses may override this, e.g. to prepend the generated code with the * variable definitions. * @param {string} code Generated code. * @return {string} Completed code. */ finish(code: string): string; /** * Naked values are top-level blocks with outputs that aren't plugged into * anything. * Subclasses may override this, e.g. if their language does not allow * naked values. * @param {string} line Line of generated code. * @return {string} Legal line of code. */ scrubNakedValue(line: string): string; } } declare module Blockly.Generator { /** * Category to separate generated function names from variables and procedures. */ var NAME_TYPE: any /*missing*/; } declare module Blockly { class Gesture extends Gesture__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Gesture__Class { /** * Class for one gesture. * @param {!Event} e The event that kicked off this gesture. * @param {!Blockly.WorkspaceSvg} creatorWorkspace The workspace that created * this gesture and has a reference to it. * @constructor */ constructor(e: Event, creatorWorkspace: Blockly.WorkspaceSvg); /** * The position of the mouse when the gesture started. Units are CSS pixels, * with (0, 0) at the top left of the browser window (mouseEvent clientX/Y). * @type {Blockly.utils.Coordinate} */ mouseDownXY_: Blockly.utils.Coordinate; /** * The workspace that the gesture started on. There may be multiple * workspaces on a page; this is more accurate than using * Blockly.getMainWorkspace(). * @type {Blockly.WorkspaceSvg} * @protected */ startWorkspace_: Blockly.WorkspaceSvg; /** * A handle to use to unbind a mouse move listener at the end of a drag. * Opaque data returned from Blockly.bindEventWithChecks_. * @type {Array.<!Array>} * @protected */ onMoveWrapper_: any[][]; /** * A handle to use to unbind a mouse up listener at the end of a drag. * Opaque data returned from Blockly.bindEventWithChecks_. * @type {Array.<!Array>} * @protected */ onUpWrapper_: any[][]; /** * Boolean used internally to break a cycle in disposal. * @type {boolean} * @protected */ isEnding_: boolean; /** * Sever all links from this object. * @package */ dispose(): void; /** * Start a gesture: update the workspace to indicate that a gesture is in * progress and bind mousemove and mouseup handlers. * @param {!Event} e A mouse down or touch start event. * @package */ doStart(e: Event): void; /** * Bind gesture events. * @param {!Event} e A mouse down or touch start event. * @package */ bindMouseEvents(e: Event): void; /** * Handle a mouse move or touch move event. * @param {!Event} e A mouse move or touch move event. * @package */ handleMove(e: Event): void; /** * Handle a mouse up or touch end event. * @param {!Event} e A mouse up or touch end event. * @package */ handleUp(e: Event): void; /** * Cancel an in-progress gesture. If a workspace or block drag is in progress, * end the drag at the most recent location. * @package */ cancel(): void; /** * Handle a real or faked right-click event by showing a context menu. * @param {!Event} e A mouse move or touch move event. * @package */ handleRightClick(e: Event): void; /** * Handle a mousedown/touchstart event on a workspace. * @param {!Event} e A mouse down or touch start event. * @param {!Blockly.Workspace} ws The workspace the event hit. * @package */ handleWsStart(e: Event, ws: Blockly.Workspace): void; /** * Handle a mousedown/touchstart event on a flyout. * @param {!Event} e A mouse down or touch start event. * @param {!Blockly.Flyout} flyout The flyout the event hit. * @package */ handleFlyoutStart(e: Event, flyout: Blockly.Flyout): void; /** * Handle a mousedown/touchstart event on a block. * @param {!Event} e A mouse down or touch start event. * @param {!Blockly.BlockSvg} block The block the event hit. * @package */ handleBlockStart(e: Event, block: Blockly.BlockSvg): void; /** * Handle a mousedown/touchstart event on a bubble. * @param {!Event} e A mouse down or touch start event. * @param {!Blockly.Bubble} bubble The bubble the event hit. * @package */ handleBubbleStart(e: Event, bubble: Blockly.Bubble): void; /** * Record the field that a gesture started on. * @param {Blockly.Field} field The field the gesture started on. * @package */ setStartField(field: Blockly.Field): void; /** * Record the bubble that a gesture started on * @param {Blockly.Bubble} bubble The bubble the gesture started on. * @package */ setStartBubble(bubble: Blockly.Bubble): void; /** * Record the block that a gesture started on, and set the target block * appropriately. * @param {Blockly.BlockSvg} block The block the gesture started on. * @package */ setStartBlock(block: Blockly.BlockSvg): void; /** * Whether this gesture is a drag of either a workspace or block. * This function is called externally to block actions that cannot be taken * mid-drag (e.g. using the keyboard to delete the selected blocks). * @return {boolean} True if this gesture is a drag of a workspace or block. * @package */ isDragging(): boolean; /** * Whether this gesture has already been started. In theory every mouse down * has a corresponding mouse up, but in reality it is possible to lose a * mouse up, leaving an in-process gesture hanging. * @return {boolean} Whether this gesture was a click on a workspace. * @package */ hasStarted(): boolean; /** * Get a list of the insertion markers that currently exist. Block drags have * 0, 1, or 2 insertion markers. * @return {!Array.<!Blockly.BlockSvg>} A possibly empty list of insertion * marker blocks. * @package */ getInsertionMarkers(): Blockly.BlockSvg[]; } } declare module Blockly.Gesture { /** * Is a drag or other gesture currently in progress on any workspace? * @return {boolean} True if gesture is occurring. */ function inProgress(): boolean; } declare module Blockly { class Grid extends Grid__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Grid__Class { /** * Class for a workspace's grid. * @param {!SVGElement} pattern The grid's SVG pattern, created during * injection. * @param {!Object} options A dictionary of normalized options for the grid. * See grid documentation: * https://developers.google.com/blockly/guides/configure/web/grid * @constructor */ constructor(pattern: SVGElement, options: Object); /** * Dispose of this grid and unlink from the DOM. * @package */ dispose(): void; /** * Whether blocks should snap to the grid, based on the initial configuration. * @return {boolean} True if blocks should snap, false otherwise. * @package */ shouldSnap(): boolean; /** * Get the spacing of the grid points (in px). * @return {number} The spacing of the grid points. * @package */ getSpacing(): number; /** * Get the id of the pattern element, which should be randomized to avoid * conflicts with other Blockly instances on the page. * @return {string} The pattern ID. * @package */ getPatternId(): string; /** * Update the grid with a new scale. * @param {number} scale The new workspace scale. * @package */ update(scale: number): void; /** * Move the grid to a new x and y position, and make sure that change is * visible. * @param {number} x The new x position of the grid (in px). * @param {number} y The new y position ofthe grid (in px). * @package */ moveTo(x: number, y: number): void; } } declare module Blockly.Grid { /** * Create the DOM for the grid described by options. * @param {string} rnd A random ID to append to the pattern's ID. * @param {!Object} gridOptions The object containing grid configuration. * @param {!SVGElement} defs The root SVG element for this workspace's defs. * @return {!SVGElement} The SVG element for the grid pattern. * @package */ function createDom(rnd: string, gridOptions: Object, defs: SVGElement): SVGElement; } declare module Blockly { class Icon extends Icon__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Icon__Class { /** * Class for an icon. * @param {Blockly.BlockSvg} block The block associated with this icon. * @constructor */ constructor(block: Blockly.BlockSvg); /** * The block this icon is attached to. * @type {Blockly.BlockSvg} * @protected */ block_: Blockly.BlockSvg; /** * Does this icon get hidden when the block is collapsed. */ collapseHidden: any /*missing*/; /** * Height and width of icons. */ SIZE: any /*missing*/; /** * Bubble UI (if visible). * @type {Blockly.Bubble} * @protected */ bubble_: Blockly.Bubble; /** * Absolute coordinate of icon's center. * @type {Blockly.utils.Coordinate} * @protected */ iconXY_: Blockly.utils.Coordinate; /** * Create the icon on the block. */ createIcon(): void; /** * Dispose of this icon. */ dispose(): void; /** * Add or remove the UI indicating if this icon may be clicked or not. */ updateEditable(): void; /** * Is the associated bubble visible? * @return {boolean} True if the bubble is visible. */ isVisible(): boolean; /** * Clicking on the icon toggles if the bubble is visible. * @param {!Event} e Mouse click event. * @protected */ iconClick_(e: Event): void; /** * Change the colour of the associated bubble to match its block. */ updateColour(): void; /** * Notification that the icon has moved. Update the arrow accordingly. * @param {!Blockly.utils.Coordinate} xy Absolute location in workspace coordinates. */ setIconLocation(xy: Blockly.utils.Coordinate): void; /** * Notification that the icon has moved, but we don't really know where. * Recompute the icon's location from scratch. */ computeIconLocation(): void; /** * Returns the center of the block's icon relative to the surface. * @return {!Blockly.utils.Coordinate} Object with x and y properties in workspace * coordinates. */ getIconLocation(): Blockly.utils.Coordinate; /** * Get the size of the icon as used for rendering. * This differs from the actual size of the icon, because it bulges slightly * out of its row rather than increasing the height of its row. * @return {!Blockly.utils.Size} Height and width. */ getCorrectedSize(): Blockly.utils.Size; } } declare module Blockly { /** * Inject a Blockly editor into the specified container element (usually a div). * @param {!Element|string} container Containing element, or its ID, * or a CSS selector. * @param {Object=} opt_options Optional dictionary of options. * @return {!Blockly.Workspace} Newly created main workspace. */ function inject(container: Element|string, opt_options?: Object): Blockly.Workspace; } declare module Blockly { class Input extends Input__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Input__Class { /** * Class for an input with an optional field. * @param {number} type The type of the input. * @param {string} name Language-neutral identifier which may used to find this * input again. * @param {!Blockly.Block} block The block containing this input. * @param {Blockly.Connection} connection Optional connection for this input. * @constructor */ constructor(type: number, name: string, block: Blockly.Block, connection: Blockly.Connection); /** @type {number} */ type: number; /** @type {string} */ name: string; /** @type {Blockly.Connection} */ connection: Blockly.Connection; /** @type {!Array.<!Blockly.Field>} */ fieldRow: Blockly.Field[]; /** * Alignment of input's fields (left, right or centre). * @type {number} */ align: number; /** * Get the source block for this input. * @return {Blockly.Block} The source block, or null if there is none. */ getSourceBlock(): Blockly.Block; /** * Add a field (or label from string), and all prefix and suffix fields, to the * end of the input's field row. * @param {string|!Blockly.Field} field Something to add as a field. * @param {string=} opt_name Language-neutral identifier which may used to find * this field again. Should be unique to the host block. * @return {!Blockly.Input} The input being append to (to allow chaining). */ appendField(field: string|Blockly.Field, opt_name?: string): Blockly.Input; /** * Inserts a field (or label from string), and all prefix and suffix fields, at * the location of the input's field row. * @param {number} index The index at which to insert field. * @param {string|!Blockly.Field} field Something to add as a field. * @param {string=} opt_name Language-neutral identifier which may used to find * this field again. Should be unique to the host block. * @return {number} The index following the last inserted field. */ insertFieldAt(index: number, field: string|Blockly.Field, opt_name?: string): number; /** * Remove a field from this input. * @param {string} name The name of the field. * @throws {Error} if the field is not present. */ removeField(name: string): void; /** * Gets whether this input is visible or not. * @return {boolean} True if visible. */ isVisible(): boolean; /** * Sets whether this input is visible or not. * Should only be used to collapse/uncollapse a block. * @param {boolean} visible True if visible. * @return {!Array.<!Blockly.Block>} List of blocks to render. * @package */ setVisible(visible: boolean): Blockly.Block[]; /** * Mark all fields on this input as dirty. * @package */ markDirty(): void; /** * Change a connection's compatibility. * @param {string|Array.<string>|null} check Compatible value type or * list of value types. Null if all types are compatible. * @return {!Blockly.Input} The input being modified (to allow chaining). */ setCheck(check: string|string[]|any /*null*/): Blockly.Input; /** * Change the alignment of the connection's field(s). * @param {number} align One of Blockly.ALIGN_LEFT, ALIGN_CENTRE, ALIGN_RIGHT. * In RTL mode directions are reversed, and ALIGN_RIGHT aligns to the left. * @return {!Blockly.Input} The input being modified (to allow chaining). */ setAlign(align: number): Blockly.Input; /** * Initialize the fields on this input. */ init(): void; /** * Sever all links to this input. */ dispose(): void; } } declare module Blockly { class InsertionMarkerManager extends InsertionMarkerManager__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class InsertionMarkerManager__Class { /** * Class that controls updates to connections during drags. It is primarily * responsible for finding the closest eligible connection and highlighting or * unhiglighting it as needed during a drag. * @param {!Blockly.BlockSvg} block The top block in the stack being dragged. * @constructor */ constructor(block: Blockly.BlockSvg); /** * Sever all links from this object. * @package */ dispose(): void; /** * Return whether the block would be deleted if dropped immediately, based on * information from the most recent move event. * @return {boolean} True if the block would be deleted if dropped immediately. * @package */ wouldDeleteBlock(): boolean; /** * Return whether the block would be connected if dropped immediately, based on * information from the most recent move event. * @return {boolean} True if the block would be connected if dropped * immediately. * @package */ wouldConnectBlock(): boolean; /** * Connect to the closest connection and render the results. * This should be called at the end of a drag. * @package */ applyConnections(): void; /** * Update highlighted connections based on the most recent move location. * @param {!Blockly.utils.Coordinate} dxy Position relative to drag start, * in workspace units. * @param {?number} deleteArea One of {@link Blockly.DELETE_AREA_TRASH}, * {@link Blockly.DELETE_AREA_TOOLBOX}, or {@link Blockly.DELETE_AREA_NONE}. * @package */ update(dxy: Blockly.utils.Coordinate, deleteArea: number): void; /** * Find the nearest valid connection, which may be the same as the current * closest connection. * @param {!Blockly.utils.Coordinate} dxy Position relative to drag start, * in workspace units. * @return {!Object} An object containing a local connection, a closest * connection, and a radius. */ getCandidate_(dxy: Blockly.utils.Coordinate): Object; /** * Add highlighting showing which block will be replaced. */ highlightBlock_(): void; /** * Get rid of the highlighting marking the block that will be replaced. */ unhighlightBlock_(): void; /** * Get a list of the insertion markers that currently exist. Drags have 0, 1, * or 2 insertion markers. * @return {!Array.<!Blockly.BlockSvg>} A possibly empty list of insertion * marker blocks. * @package */ getInsertionMarkers(): Blockly.BlockSvg[]; } } declare module Blockly { class Mutator extends Mutator__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Mutator__Class extends Blockly.Icon__Class { /** * Class for a mutator dialog. * @param {!Array.<string>} quarkNames List of names of sub-blocks for flyout. * @extends {Blockly.Icon} * @constructor */ constructor(quarkNames: string[]); /** * Add or remove the UI indicating if this icon may be clicked or not. */ updateEditable(): void; /** * Show or hide the mutator bubble. * @param {boolean} visible True if the bubble should be visible. */ setVisible(visible: boolean): void; /** * Dispose of this mutator. */ dispose(): void; /** * Update the styles on all blocks in the mutator. * @public */ updateBlockStyle(): void; } } declare module Blockly.Mutator { /** * Reconnect an block to a mutated input. * @param {Blockly.Connection} connectionChild Connection on child block. * @param {!Blockly.Block} block Parent block. * @param {string} inputName Name of input on parent block. * @return {boolean} True iff a reconnection was made, false otherwise. */ function reconnect(connectionChild: Blockly.Connection, block: Blockly.Block, inputName: string): boolean; /** * Get the parent workspace of a workspace that is inside a mutator, taking into * account whether it is a flyout. * @param {Blockly.Workspace} workspace The workspace that is inside a mutator. * @return {Blockly.Workspace} The mutator's parent workspace or null. * @public */ function findParentWs(workspace: Blockly.Workspace): Blockly.Workspace; } declare module Blockly { class Names extends Names__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Names__Class { /** * Class for a database of entity names (variables, functions, etc). * @param {string} reservedWords A comma-separated string of words that are * illegal for use as names in a language (e.g. 'new,if,this,...'). * @param {string=} opt_variablePrefix Some languages need a '$' or a namespace * before all variable names. * @constructor */ constructor(reservedWords: string, opt_variablePrefix?: string); /** * Empty the database and start from scratch. The reserved words are kept. */ reset(): void; /** * Set the variable map that maps from variable name to variable object. * @param {!Blockly.VariableMap} map The map to track. * @package */ setVariableMap(map: Blockly.VariableMap): void; /** * Convert a Blockly entity name to a legal exportable entity name. * @param {string} name The Blockly entity name (no constraints). * @param {string} type The type of entity in Blockly * ('VARIABLE', 'PROCEDURE', 'BUILTIN', etc...). * @return {string} An entity name that is legal in the exported language. * @suppress {deprecated} Suppress deprecated Blockly.Variables.NAME_TYPE. */ getName(name: string, type: string): string; /** * Convert a Blockly entity name to a legal exportable entity name. * Ensure that this is a new name not overlapping any previously defined name. * Also check against list of reserved words for the current language and * ensure name doesn't collide. * @param {string} name The Blockly entity name (no constraints). * @param {string} type The type of entity in Blockly * ('VARIABLE', 'PROCEDURE', 'BUILTIN', etc...). * @return {string} An entity name that is legal in the exported language. */ getDistinctName(name: string, type: string): string; } } declare module Blockly.Names { /** * Constant to separate developer variable names from user-defined variable * names when running generators. * A developer variable will be declared as a global in the generated code, but * will never be shown to the user in the workspace or stored in the variable * map. */ var DEVELOPER_VARIABLE_TYPE: any /*missing*/; /** * Do the given two entity names refer to the same entity? * Blockly names are case-insensitive. * @param {string} name1 First name. * @param {string} name2 Second name. * @return {boolean} True if names are the same. */ function equals(name1: string, name2: string): boolean; } declare module Blockly { class Options extends Options__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Options__Class { /** * Parse the user-specified options, using reasonable defaults where behaviour * is unspecified. * @param {!Object} options Dictionary of options. Specification: * https://developers.google.com/blockly/guides/get-started/web#configuration * @constructor */ constructor(options: Object); /** @deprecated January 2019 */ hasScrollbars: any /*missing*/; /** * The parent of the current workspace, or null if there is no parent workspace. * @type {Blockly.Workspace} */ parentWorkspace: Blockly.Workspace; /** * If set, sets the translation of the workspace to match the scrollbars. */ setMetrics: any /*missing*/; /** * Return an object with the metrics required to size the workspace. * @return {Object} Contains size and position metrics, or null. */ getMetrics(): Object; } } declare module Blockly.Options { /** * Parse the provided toolbox tree into a consistent DOM format. * @param {Node|string} tree DOM tree of blocks, or text representation of same. * @return {Node} DOM tree of blocks, or null. */ function parseToolboxTree(tree: Node|string): Node; } declare module Blockly.Procedures { /** * Constant to separate procedure names from variables and generated functions * when running generators. * @deprecated Use Blockly.PROCEDURE_CATEGORY_NAME */ var NAME_TYPE: any /*missing*/; /** * Find all user-created procedure definitions in a workspace. * @param {!Blockly.Workspace} root Root workspace. * @return {!Array.<!Array.<!Array>>} Pair of arrays, the * first contains procedures without return variables, the second with. * Each procedure is defined by a three-element list of name, parameter * list, and return value boolean. */ function allProcedures(root: Blockly.Workspace): any[][][]; /** * Ensure two identically-named procedures don't exist. * Take the proposed procedure name, and return a legal name i.e. one that * is not empty and doesn't collide with other procedures. * @param {string} name Proposed procedure name. * @param {!Blockly.Block} block Block to disambiguate. * @return {string} Non-colliding name. */ function findLegalName(name: string, block: Blockly.Block): string; /** * Return if the given name is already a procedure name. * @param {string} name The questionable name. * @param {!Blockly.Workspace} workspace The workspace to scan for collisions. * @param {Blockly.Block=} opt_exclude Optional block to exclude from * comparisons (one doesn't want to collide with oneself). * @return {boolean} True if the name is used, otherwise return false. */ function isNameUsed(name: string, workspace: Blockly.Workspace, opt_exclude?: Blockly.Block): boolean; /** * Rename a procedure. Called by the editable field. * @param {string} name The proposed new name. * @return {string} The accepted name. * @this {Blockly.Field} */ function rename(name: string): string; /** * Construct the blocks required by the flyout for the procedure category. * @param {!Blockly.Workspace} workspace The workspace containing procedures. * @return {!Array.<!Element>} Array of XML block elements. */ function flyoutCategory(workspace: Blockly.Workspace): Element[]; /** * Find all the callers of a named procedure. * @param {string} name Name of procedure. * @param {!Blockly.Workspace} workspace The workspace to find callers in. * @return {!Array.<!Blockly.Block>} Array of caller blocks. */ function getCallers(name: string, workspace: Blockly.Workspace): Blockly.Block[]; /** * When a procedure definition changes its parameters, find and edit all its * callers. * @param {!Blockly.Block} defBlock Procedure definition block. */ function mutateCallers(defBlock: Blockly.Block): void; /** * Find the definition block for the named procedure. * @param {string} name Name of procedure. * @param {!Blockly.Workspace} workspace The workspace to search. * @return {Blockly.Block} The procedure definition block, or null not found. */ function getDefinition(name: string, workspace: Blockly.Workspace): Blockly.Block; } declare module Blockly { class RenderedConnection extends RenderedConnection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class RenderedConnection__Class extends Blockly.Connection__Class { /** * Class for a connection between blocks that may be rendered on screen. * @param {!Blockly.BlockSvg} source The block establishing this connection. * @param {number} type The type of the connection. * @extends {Blockly.Connection} * @constructor */ constructor(source: Blockly.BlockSvg, type: number); /** * Returns the distance between this connection and another connection in * workspace units. * @param {!Blockly.Connection} otherConnection The other connection to measure * the distance to. * @return {number} The distance between connections, in workspace units. */ distanceFrom(otherConnection: Blockly.Connection): number; /** * Change the connection's coordinates. * @param {number} x New absolute x coordinate, in workspace coordinates. * @param {number} y New absolute y coordinate, in workspace coordinates. */ moveTo(x: number, y: number): void; /** * Change the connection's coordinates. * @param {number} dx Change to x coordinate, in workspace units. * @param {number} dy Change to y coordinate, in workspace units. */ moveBy(dx: number, dy: number): void; /** * Move this connection to the location given by its offset within the block and * the location of the block's top left corner. * @param {!Blockly.utils.Coordinate} blockTL The location of the top left corner * of the block, in workspace coordinates. */ moveToOffset(blockTL: Blockly.utils.Coordinate): void; /** * Set the offset of this connection relative to the top left of its block. * @param {number} x The new relative x, in workspace units. * @param {number} y The new relative y, in workspace units. */ setOffsetInBlock(x: number, y: number): void; /** * Get the offset of this connection relative to the top left of its block. * @return {!Blockly.utils.Coordinate} The offset of the connection. * @package */ getOffsetInBlock(): Blockly.utils.Coordinate; /** * Find the closest compatible connection to this connection. * All parameters are in workspace units. * @param {number} maxLimit The maximum radius to another connection. * @param {!Blockly.utils.Coordinate} dxy Offset between this connection's location * in the database and the current location (as a result of dragging). * @return {!{connection: ?Blockly.Connection, radius: number}} Contains two * properties: 'connection' which is either another connection or null, * and 'radius' which is the distance. */ closest(maxLimit: number, dxy: Blockly.utils.Coordinate): { connection: Blockly.Connection; radius: number }; /** * Add highlighting around this connection. */ highlight(): void; /** * Unhide this connection, as well as all down-stream connections on any block * attached to this connection. This happens when a block is expanded. * Also unhides down-stream comments. * @return {!Array.<!Blockly.Block>} List of blocks to render. */ unhideAll(): Blockly.Block[]; /** * Remove the highlighting around this connection. */ unhighlight(): void; /** * Set whether this connections is hidden (not tracked in a database) or not. * @param {boolean} hidden True if connection is hidden. */ setHidden(hidden: boolean): void; /** * Hide this connection, as well as all down-stream connections on any block * attached to this connection. This happens when a block is collapsed. * Also hides down-stream comments. */ hideAll(): void; /** * Check if the two connections can be dragged to connect to each other. * @param {!Blockly.Connection} candidate A nearby connection to check. * @param {number=} maxRadius The maximum radius allowed for connections, in * workspace units. * @return {boolean} True if the connection is allowed, false otherwise. */ isConnectionAllowed(candidate: Blockly.Connection, maxRadius?: number): boolean; /** * Behavior after a connection attempt fails. * @param {Blockly.Connection} otherConnection Connection that this connection * failed to connect to. * @package */ onFailedConnect(otherConnection: Blockly.Connection): void; } } declare module Blockly { class ScrollbarPair extends ScrollbarPair__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ScrollbarPair__Class { /** * Class for a pair of scrollbars. Horizontal and vertical. * @param {!Blockly.Workspace} workspace Workspace to bind the scrollbars to. * @constructor */ constructor(workspace: Blockly.Workspace); /** * Dispose of this pair of scrollbars. * Unlink from all DOM elements to prevent memory leaks. */ dispose(): void; /** * Recalculate both of the scrollbars' locations and lengths. * Also reposition the corner rectangle. */ resize(): void; /** * Set the handles of both scrollbars to be at a certain position in CSS pixels * relative to their parents. * @param {number} x Horizontal scroll value. * @param {number} y Vertical scroll value. */ set(x: number, y: number): void; /** * Set whether this scrollbar's container is visible. * @param {boolean} visible Whether the container is visible. */ setContainerVisible(visible: boolean): void; } class Scrollbar extends Scrollbar__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Scrollbar__Class { /** * Class for a pure SVG scrollbar. * This technique offers a scrollbar that is guaranteed to work, but may not * look or behave like the system's scrollbars. * @param {!Blockly.Workspace} workspace Workspace to bind the scrollbar to. * @param {boolean} horizontal True if horizontal, false if vertical. * @param {boolean=} opt_pair True if scrollbar is part of a horiz/vert pair. * @param {string=} opt_class A class to be applied to this scrollbar. * @constructor */ constructor(workspace: Blockly.Workspace, horizontal: boolean, opt_pair?: boolean, opt_class?: string); /** * The position of the mouse along this scrollbar's major axis at the start of * the most recent drag. * Units are CSS pixels, with (0, 0) at the top left of the browser window. * For a horizontal scrollbar this is the x coordinate of the mouse down event; * for a vertical scrollbar it's the y coordinate of the mouse down event. * @type {Blockly.utils.Coordinate} */ startDragMouse_: Blockly.utils.Coordinate; /** * Dispose of this scrollbar. * Unlink from all DOM elements to prevent memory leaks. */ dispose(): void; /** * Set the length of the scrollbar's handle and change the SVG attribute * accordingly. * @param {number} newLength The new scrollbar handle length in CSS pixels. */ setHandleLength_(newLength: number): void; /** * Set the offset of the scrollbar's handle from the scrollbar's position, and * change the SVG attribute accordingly. * @param {number} newPosition The new scrollbar handle offset in CSS pixels. */ setHandlePosition(newPosition: number): void; /** * Recalculate the scrollbar's location and its length. * @param {Object=} opt_metrics A data structure of from the describing all the * required dimensions. If not provided, it will be fetched from the host * object. */ resize(opt_metrics?: Object): void; /** * Recalculate a horizontal scrollbar's location on the screen and path length. * This should be called when the layout or size of the window has changed. * @param {!Object} hostMetrics A data structure describing all the * required dimensions, possibly fetched from the host object. */ resizeViewHorizontal(hostMetrics: Object): void; /** * Recalculate a horizontal scrollbar's location within its path and length. * This should be called when the contents of the workspace have changed. * @param {!Object} hostMetrics A data structure describing all the * required dimensions, possibly fetched from the host object. */ resizeContentHorizontal(hostMetrics: Object): void; /** * Recalculate a vertical scrollbar's location on the screen and path length. * This should be called when the layout or size of the window has changed. * @param {!Object} hostMetrics A data structure describing all the * required dimensions, possibly fetched from the host object. */ resizeViewVertical(hostMetrics: Object): void; /** * Recalculate a vertical scrollbar's location within its path and length. * This should be called when the contents of the workspace have changed. * @param {!Object} hostMetrics A data structure describing all the * required dimensions, possibly fetched from the host object. */ resizeContentVertical(hostMetrics: Object): void; /** * Is the scrollbar visible. Non-paired scrollbars disappear when they aren't * needed. * @return {boolean} True if visible. */ isVisible(): boolean; /** * Set whether the scrollbar's container is visible and update * display accordingly if visibility has changed. * @param {boolean} visible Whether the container is visible */ setContainerVisible(visible: boolean): void; /** * Set whether the scrollbar is visible. * Only applies to non-paired scrollbars. * @param {boolean} visible True if visible. */ setVisible(visible: boolean): void; /** * Update visibility of scrollbar based on whether it thinks it should * be visible and whether its containing workspace is visible. * We cannot rely on the containing workspace being hidden to hide us * because it is not necessarily our parent in the DOM. */ updateDisplay_(): void; /** * Set the scrollbar handle's position. * @param {number} value The distance from the top/left end of the bar, in CSS * pixels. It may be larger than the maximum allowable position of the * scrollbar handle. */ set(value: number): void; /** * Record the origin of the workspace that the scrollbar is in, in pixels * relative to the injection div origin. This is for times when the scrollbar is * used in an object whose origin isn't the same as the main workspace * (e.g. in a flyout.) * @param {number} x The x coordinate of the scrollbar's origin, in CSS pixels. * @param {number} y The y coordinate of the scrollbar's origin, in CSS pixels. */ setOrigin(x: number, y: number): void; } } declare module Blockly.Scrollbar { /** * Width of vertical scrollbar or height of horizontal scrollbar in CSS pixels. * Scrollbars should be larger on touch devices. */ var scrollbarThickness: any /*missing*/; } declare module Blockly { class Theme extends Theme__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Theme__Class { /** * Class for a theme. * @param {!Object.<string, Blockly.Theme.BlockStyle>} blockStyles A map from * style names (strings) to objects with style attributes for blocks. * @param {!Object.<string, Blockly.Theme.CategoryStyle>} categoryStyles A map * from style names (strings) to objects with style attributes for * categories. * @param {!Object.<string, *>=} opt_componentStyles A map of Blockly component * names to style value. * @constructor */ constructor(blockStyles: { [key: string]: Blockly.Theme.BlockStyle }, categoryStyles: { [key: string]: Blockly.Theme.CategoryStyle }, opt_componentStyles?: { [key: string]: any }); /** * The block styles map. * @type {!Object.<string, Blockly.Theme.BlockStyle>} */ blockStyles_: { [key: string]: Blockly.Theme.BlockStyle }; /** * The category styles map. * @type {!Object.<string, Blockly.Theme.CategoryStyle>} */ categoryStyles_: { [key: string]: Blockly.Theme.CategoryStyle }; /** * The UI components styles map. * @type {!Object.<string, *>} */ componentStyles_: { [key: string]: any }; /** * Overrides or adds all values from blockStyles to blockStyles_ * @param {Object.<string, Blockly.Theme.BlockStyle>} blockStyles Map of * block styles. */ setAllBlockStyles(blockStyles: { [key: string]: Blockly.Theme.BlockStyle }): void; /** * Gets a map of all the block style names. * @return {!Object.<string, Blockly.Theme.BlockStyle>} Map of block styles. */ getAllBlockStyles(): { [key: string]: Blockly.Theme.BlockStyle }; /** * Gets the BlockStyle for the given block style name. * @param {string} blockStyleName The name of the block style. * @return {Blockly.Theme.BlockStyle|undefined} The named block style. */ getBlockStyle(blockStyleName: string): Blockly.Theme.BlockStyle|any /*undefined*/; /** * Overrides or adds a style to the blockStyles map. * @param {string} blockStyleName The name of the block style. * @param {Blockly.Theme.BlockStyle} blockStyle The block style. */ setBlockStyle(blockStyleName: string, blockStyle: Blockly.Theme.BlockStyle): void; /** * Gets the CategoryStyle for the given category style name. * @param {string} categoryStyleName The name of the category style. * @return {Blockly.Theme.CategoryStyle|undefined} The named category style. */ getCategoryStyle(categoryStyleName: string): Blockly.Theme.CategoryStyle|any /*undefined*/; /** * Overrides or adds a style to the categoryStyles map. * @param {string} categoryStyleName The name of the category style. * @param {Blockly.Theme.CategoryStyle} categoryStyle The category style. */ setCategoryStyle(categoryStyleName: string, categoryStyle: Blockly.Theme.CategoryStyle): void; /** * Gets the style for a given Blockly UI component. If the style value is a * string, we attempt to find the value of any named references. * @param {string} componentName The name of the component. * @return {?string} The style value. */ getComponentStyle(componentName: string): string; /** * Configure a specific Blockly UI component with a style value. * @param {string} componentName The name of the component. * @param {*} styleValue The style value. */ setComponentStyle(componentName: string, styleValue: any): void; } } declare module Blockly.Theme { /** * A block style. * @typedef {{ * colourPrimary:string, * colourSecondary:string, * colourTertiary:string, * hat:string * }} */ interface BlockStyle { colourPrimary: string; colourSecondary: string; colourTertiary: string; hat: string } /** * A category style. * @typedef {{ * colour:string * }} */ interface CategoryStyle { colour: string } } declare module Blockly { class ThemeManager extends ThemeManager__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ThemeManager__Class { /** * Class for storing and updating a workspace's theme and UI components. * @param {!Blockly.Theme} theme The workspace theme. * @constructor * @package */ constructor(theme: Blockly.Theme); /** * Get the workspace theme. * @return {!Blockly.Theme} The workspace theme. * @package */ getTheme(): Blockly.Theme; /** * Set the workspace theme, and refresh the workspace and all components. * @param {!Blockly.Theme} theme The workspace theme. * @package */ setTheme(theme: Blockly.Theme): void; /** * Subscribe a workspace to changes to the selected theme. If a new theme is * set, the workspace is called to refresh its blocks. * @param {!Blockly.Workspace} workspace The workspace to subscribe. * @package */ subscribeWorkspace(workspace: Blockly.Workspace): void; /** * Unsubscribe a workspace to changes to the selected theme. * @param {!Blockly.Workspace} workspace The workspace to unsubscribe. * @package */ unsubscribeWorkspace(workspace: Blockly.Workspace): void; /** * Subscribe an element to changes to the selected theme. If a new theme is * selected, the element's style is refreshed with the new theme's style. * @param {!Element} element The element to subscribe. * @param {string} componentName The name used to identify the component. This * must be the same name used to configure the style in the Theme object. * @param {string} propertyName The inline style property name to update. * @package */ subscribe(element: Element, componentName: string, propertyName: string): void; /** * Unsubscribe an element to changes to the selected theme. * @param {Element} element The element to unsubscribe. * @package */ unsubscribe(element: Element): void; /** * Dispose of this theme manager. * @package * @suppress {checkTypes} */ dispose(): void; } } declare module Blockly.ThemeManager { /** * A Blockly UI component type. * @typedef {{ * element:!Element, * propertyName:string * }} */ interface Component { element: Element; propertyName: string } } declare module Blockly { class Toolbox extends Toolbox__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Toolbox__Class { /** * Class for a Toolbox. * Creates the toolbox's DOM. * @param {!Blockly.WorkspaceSvg} workspace The workspace in which to create new * blocks. * @constructor */ constructor(workspace: Blockly.WorkspaceSvg); /** * Is RTL vs LTR. * @type {boolean} */ RTL: boolean; /** * Position of the toolbox and flyout relative to the workspace. * @type {number} */ toolboxPosition: number; /** * Width of the toolbox, which changes only in vertical layout. * @type {number} */ width: number; /** * Height of the toolbox, which changes only in horizontal layout. * @type {number} */ height: number; /** * Initializes the toolbox. */ init(): void; /** * HTML container for the Toolbox menu. * @type {Element} */ HtmlDiv: Element; /** * Fill the toolbox with categories and blocks. * @param {!Node} languageTree DOM tree of blocks. * @package */ renderTree(languageTree: Node): void; /** * Handles the given Blockly action on a toolbox. * This is only triggered when keyboard accessibility mode is enabled. * @param {!Blockly.Action} action The action to be handled. * @return {boolean} True if the field handled the action, false otherwise. * @package */ onBlocklyAction(action: Blockly.Action): boolean; /** * Dispose of this toolbox. */ dispose(): void; /** * Get the width of the toolbox. * @return {number} The width of the toolbox. */ getWidth(): number; /** * Get the height of the toolbox. * @return {number} The width of the toolbox. */ getHeight(): number; /** * Move the toolbox to the edge. */ position(): void; /** * Retrieves and sets the colour for the category using the style name. * The category colour is set from the colour style attribute. * @param {string} styleName Name of the style. * @param {!Blockly.tree.TreeNode} childOut The child to set the hexColour on. * @param {string} categoryName Name of the toolbox category. */ setColourFromStyle_(styleName: string, childOut: Blockly.tree.TreeNode, categoryName: string): void; /** * Updates the category colours and background colour of selected categories. * @package */ updateColourFromTheme(): void; /** * Unhighlight any previously specified option. */ clearSelection(): void; /** * Adds a style on the toolbox. Usually used to change the cursor. * @param {string} style The name of the class to add. * @package */ addStyle(style: string): void; /** * Removes a style from the toolbox. Usually used to change the cursor. * @param {string} style The name of the class to remove. * @package */ removeStyle(style: string): void; /** * Return the deletion rectangle for this toolbox. * @return {Blockly.utils.Rect} Rectangle in which to delete. */ getClientRect(): Blockly.utils.Rect; /** * Update the flyout's contents without closing it. Should be used in response * to a change in one of the dynamic categories, such as variables or * procedures. */ refreshSelection(): void; /** * Select the first toolbox category if no category is selected. * @package */ selectFirstCategory(): void; } } declare module Blockly.Toolbox { class TreeSeparator extends TreeSeparator__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class TreeSeparator__Class extends Blockly.tree.TreeNode__Class { /** * A blank separator node in the tree. * @param {!Blockly.tree.BaseNode.Config} config The configuration for the tree. * @constructor * @extends {Blockly.tree.TreeNode} */ constructor(config: Blockly.tree.BaseNode.Config); } } declare module Blockly.Tooltip { /** * Is a tooltip currently showing? */ var visible: any /*missing*/; /** * Maximum width (in characters) of a tooltip. */ var LIMIT: any /*missing*/; /** * Horizontal offset between mouse cursor and tooltip. */ var OFFSET_X: any /*missing*/; /** * Vertical offset between mouse cursor and tooltip. */ var OFFSET_Y: any /*missing*/; /** * Radius mouse can move before killing tooltip. */ var RADIUS_OK: any /*missing*/; /** * Delay before tooltip appears. */ var HOVER_MS: any /*missing*/; /** * Horizontal padding between tooltip and screen edge. */ var MARGINS: any /*missing*/; /** * The HTML container. Set once by Blockly.Tooltip.createDom. * @type {Element} */ var DIV: Element; /** * Create the tooltip div and inject it onto the page. */ function createDom(): void; /** * Binds the required mouse events onto an SVG element. * @param {!Element} element SVG element onto which tooltip is to be bound. */ function bindMouseEvents(element: Element): void; /** * Hide the tooltip. */ function hide(): void; /** * Hide any in-progress tooltips and block showing new tooltips until the next * call to unblock(). * @package */ function block(): void; /** * Unblock tooltips: allow them to be scheduled and shown according to their own * logic. * @package */ function unblock(): void; } declare module Blockly.Touch { /** * Whether touch is enabled in the browser. * Copied from Closure's goog.events.BrowserFeature.TOUCH_ENABLED */ var TOUCH_ENABLED: any /*missing*/; /** * The TOUCH_MAP lookup dictionary specifies additional touch events to fire, * in conjunction with mouse events. * @type {Object} */ var TOUCH_MAP: Object; /** * Clear the touch identifier that tracks which touch stream to pay attention * to. This ends the current drag/gesture and allows other pointers to be * captured. */ function clearTouchIdentifier(): void; /** * Decide whether Blockly should handle or ignore this event. * Mouse and touch events require special checks because we only want to deal * with one touch stream at a time. All other events should always be handled. * @param {!Event} e The event to check. * @return {boolean} True if this event should be passed through to the * registered handler; false if it should be blocked. */ function shouldHandleEvent(e: Event): boolean; /** * Get the touch identifier from the given event. If it was a mouse event, the * identifier is the string 'mouse'. * @param {!Event} e Mouse event or touch event. * @return {string} The touch identifier from the first changed touch, if * defined. Otherwise 'mouse'. */ function getTouchIdentifierFromEvent(e: Event): string; /** * Check whether the touch identifier on the event matches the current saved * identifier. If there is no identifier, that means it's a mouse event and * we'll use the identifier "mouse". This means we won't deal well with * multiple mice being used at the same time. That seems okay. * If the current identifier was unset, save the identifier from the * event. This starts a drag/gesture, during which touch events with other * identifiers will be silently ignored. * @param {!Event} e Mouse event or touch event. * @return {boolean} Whether the identifier on the event matches the current * saved identifier. */ function checkTouchIdentifier(e: Event): boolean; /** * Set an event's clientX and clientY from its first changed touch. Use this to * make a touch event work in a mouse event handler. * @param {!Event} e A touch event. */ function setClientFromTouch(e: Event): void; /** * Check whether a given event is a mouse or touch event. * @param {!Event} e An event. * @return {boolean} True if it is a mouse or touch event; false otherwise. */ function isMouseOrTouchEvent(e: Event): boolean; /** * Check whether a given event is a touch event or a pointer event. * @param {!Event} e An event. * @return {boolean} True if it is a touch event; false otherwise. */ function isTouchEvent(e: Event): boolean; /** * Split an event into an array of events, one per changed touch or mouse * point. * @param {!Event} e A mouse event or a touch event with one or more changed * touches. * @return {!Array.<!Event>} An array of mouse or touch events. Each touch * event will have exactly one changed touch. */ function splitEventByTouches(e: Event): Event[]; } declare module Blockly { /** * Nope, that's not a long-press. Either touchend or touchcancel was fired, * or a drag hath begun. Kill the queued long-press task. * @package */ function longStop_(): void; } declare module Blockly { class TouchGesture extends TouchGesture__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class TouchGesture__Class extends Blockly.Gesture__Class { /** * Class for one gesture. * @param {!Event} e The event that kicked off this gesture. * @param {!Blockly.WorkspaceSvg} creatorWorkspace The workspace that created * this gesture and has a reference to it. * @extends {Blockly.Gesture} * @constructor */ constructor(e: Event, creatorWorkspace: Blockly.WorkspaceSvg); /** * Start a gesture: update the workspace to indicate that a gesture is in * progress and bind mousemove and mouseup handlers. * @param {!Event} e A mouse down, touch start or pointer down event. * @package */ doStart(e: Event): void; /** * Bind gesture events. * Overriding the gesture definition of this function, binding the same * functions for onMoveWrapper_ and onUpWrapper_ but passing * opt_noCaptureIdentifier. * In addition, binding a second mouse down event to detect multi-touch events. * @param {!Event} e A mouse down or touch start event. * @package */ bindMouseEvents(e: Event): void; /** * Handle a mouse down, touch start, or pointer down event. * @param {!Event} e A mouse down, touch start, or pointer down event. * @package */ handleStart(e: Event): void; /** * Handle a mouse move, touch move, or pointer move event. * @param {!Event} e A mouse move, touch move, or pointer move event. * @package */ handleMove(e: Event): void; /** * Handle a mouse up, touch end, or pointer up event. * @param {!Event} e A mouse up, touch end, or pointer up event. * @package */ handleUp(e: Event): void; /** * Whether this gesture is part of a multi-touch gesture. * @return {boolean} Whether this gesture is part of a multi-touch gesture. * @package */ isMultiTouch(): boolean; /** * Sever all links from this object. * @package */ dispose(): void; /** * Handle a touch start or pointer down event and keep track of current * pointers. * @param {!Event} e A touch start, or pointer down event. * @package */ handleTouchStart(e: Event): void; /** * Handle a touch move or pointer move event and zoom in/out if two pointers * are on the screen. * @param {!Event} e A touch move, or pointer move event. * @package */ handleTouchMove(e: Event): void; /** * Handle a touch end or pointer end event and end the gesture. * @param {!Event} e A touch end, or pointer end event. * @package */ handleTouchEnd(e: Event): void; /** * Helper function returning the current touch point coordinate. * @param {!Event} e A touch or pointer event. * @return {Blockly.utils.Coordinate} The current touch point coordinate * @package */ getTouchPoint(e: Event): Blockly.utils.Coordinate; } } declare module Blockly.TouchGesture { /** * A multiplier used to convert the gesture scale to a zoom in delta. * @const */ var ZOOM_IN_MULTIPLIER: any /*missing*/; /** * A multiplier used to convert the gesture scale to a zoom out delta. * @const */ var ZOOM_OUT_MULTIPLIER: any /*missing*/; } declare module Blockly { class Trashcan extends Trashcan__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Trashcan__Class { /** * Class for a trash can. * @param {!Blockly.Workspace} workspace The workspace to sit in. * @constructor */ constructor(workspace: Blockly.Workspace); /** * Current open/close state of the lid. * @type {boolean} */ isOpen: boolean; /** * Create the trash can elements. * @return {!SVGElement} The trash can's SVG group. */ createDom(): SVGElement; /** * Initialize the trash can. * @param {number} verticalSpacing Vertical distance from workspace edge to the * same edge of the trashcan. * @return {number} Vertical distance from workspace edge to the opposite * edge of the trashcan. */ init(verticalSpacing: number): number; /** * Dispose of this trash can. * Unlink from all DOM elements to prevent memory leaks. */ dispose(): void; /** * Position the trashcan. * It is positioned in the opposite corner to the corner the * categories/toolbox starts at. */ position(): void; /** * Return the deletion rectangle for this trash can. * @return {Blockly.utils.Rect} Rectangle in which to delete. */ getClientRect(): Blockly.utils.Rect; /** * Flip the lid shut. * Called externally after a drag. */ close(): void; /** * Inspect the contents of the trash. */ click(): void; } } declare module Blockly.Events { class Ui extends Ui__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Ui__Class extends Blockly.Events.Abstract__Class { /** * Class for a UI event. * UI events are events that don't need to be sent over the wire for multi-user * editing to work (e.g. scrolling the workspace, zooming, opening toolbox * categories). * UI events do not undo or redo. * @param {Blockly.Block} block The affected block. * @param {string} element One of 'selected', 'comment', 'mutatorOpen', etc. * @param {*} oldValue Previous value of element. * @param {*} newValue New value of element. * @extends {Blockly.Events.Abstract} * @constructor */ constructor(block: Blockly.Block, element: string, oldValue: any, newValue: any); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; } } declare module Blockly.utils.uiMenu { /** * Get the size of a rendered goog.ui.Menu. * @param {!Blockly.Menu} menu The menu to measure. * @return {!Blockly.utils.Size} Object with width and height properties. * @package */ function getSize(menu: Blockly.Menu): Blockly.utils.Size; /** * Adjust the bounding boxes used to position the widget div to deal with RTL * goog.ui.Menu positioning. In RTL mode the menu renders down and to the left * of its start point, instead of down and to the right. Adjusting all of the * bounding boxes accordingly allows us to use the same code for all widgets. * This function in-place modifies the provided bounding boxes. * @param {!Object} viewportBBox The bounding rectangle of the current viewport, * in window coordinates. * @param {!Object} anchorBBox The bounding rectangle of the anchor, in window * coordinates. * @param {!Blockly.utils.Size} menuSize The size of the menu that is inside the * widget div, in window coordinates. * @package */ function adjustBBoxesForRTL(viewportBBox: Object, anchorBBox: Object, menuSize: Blockly.utils.Size): void; } declare module Blockly.utils { /** * Don't do anything for this event, just halt propagation. * @param {!Event} e An event. */ function noEvent(e: Event): void; /** * Is this event targeting a text input widget? * @param {!Event} e An event. * @return {boolean} True if text input. */ function isTargetInput(e: Event): boolean; /** * Return the coordinates of the top-left corner of this element relative to * its parent. Only for SVG elements and children (e.g. rect, g, path). * @param {!Element} element SVG element to find the coordinates of. * @return {!Blockly.utils.Coordinate} Object with .x and .y properties. */ function getRelativeXY(element: Element): Blockly.utils.Coordinate; /** * Return the coordinates of the top-left corner of this element relative to * the div Blockly was injected into. * @param {!Element} element SVG element to find the coordinates of. If this is * not a child of the div Blockly was injected into, the behaviour is * undefined. * @return {!Blockly.utils.Coordinate} Object with .x and .y properties. */ function getInjectionDivXY_(element: Element): Blockly.utils.Coordinate; /** * Is this event a right-click? * @param {!Event} e Mouse event. * @return {boolean} True if right-click. */ function isRightButton(e: Event): boolean; /** * Return the converted coordinates of the given mouse event. * The origin (0,0) is the top-left corner of the Blockly SVG. * @param {!Event} e Mouse event. * @param {!Element} svg SVG element. * @param {SVGMatrix} matrix Inverted screen CTM to use. * @return {!SVGPoint} Object with .x and .y properties. */ function mouseToSvg(e: Event, svg: Element, matrix: SVGMatrix): SVGPoint; /** * Get the scroll delta of a mouse event in pixel units. * @param {!Event} e Mouse event. * @return {{x: number, y: number}} Scroll delta object with .x and .y * properties. */ function getScrollDeltaPixels(e: Event): { x: number; y: number }; /** * Parse a string with any number of interpolation tokens (%1, %2, ...). * It will also replace string table references (e.g., %{bky_my_msg} and * %{BKY_MY_MSG} will both be replaced with the value in * Blockly.Msg['MY_MSG']). Percentage sign characters '%' may be self-escaped * (e.g., '%%'). * @param {string} message Text which might contain string table references and * interpolation tokens. * @return {!Array.<string|number>} Array of strings and numbers. */ function tokenizeInterpolation(message: string): string|number[]; /** * Replaces string table references in a message, if the message is a string. * For example, "%{bky_my_msg}" and "%{BKY_MY_MSG}" will both be replaced with * the value in Blockly.Msg['MY_MSG']. * @param {string|?} message Message, which may be a string that contains * string table references. * @return {string} String with message references replaced. */ function replaceMessageReferences(message: string|any): string; /** * Validates that any %{MSG_KEY} references in the message refer to keys of * the Blockly.Msg string table. * @param {string} message Text which might contain string table references. * @return {boolean} True if all message references have matching values. * Otherwise, false. */ function checkMessageReferences(message: string): boolean; /** * Generate a unique ID. This should be globally unique. * 87 characters ^ 20 length > 128 bits (better than a UUID). * @return {string} A globally unique ID string. */ function genUid(): string; /** * Check if 3D transforms are supported by adding an element * and attempting to set the property. * @return {boolean} True if 3D transforms are supported. */ function is3dSupported(): boolean; /** * Calls a function after the page has loaded, possibly immediately. * @param {function()} fn Function to run. * @throws Error Will throw if no global document can be found (e.g., Node.js). */ function runAfterPageLoad(fn: { (): any /*missing*/ }): void; /** * Get the position of the current viewport in window coordinates. This takes * scroll into account. * @return {!Object} An object containing window width, height, and scroll * position in window coordinates. * @package */ function getViewportBBox(): Object; /** * Removes the first occurrence of a particular value from an array. * @param {!Array} arr Array from which to remove * value. * @param {*} obj Object to remove. * @return {boolean} True if an element was removed. * @package */ function arrayRemove(arr: any[], obj: any): boolean; /** * Gets the document scroll distance as a coordinate object. * Copied from Closure's goog.dom.getDocumentScroll. * @return {!Blockly.utils.Coordinate} Object with values 'x' and 'y'. */ function getDocumentScroll(): Blockly.utils.Coordinate; /** * Get a map of all the block's descendants mapping their type to the number of * children with that type. * @param {!Blockly.Block} block The block to map. * @param {boolean=} opt_stripFollowing Optionally ignore all following * statements (blocks that are not inside a value or statement input * of the block). * @return {!Object} Map of types to type counts for descendants of the bock. */ function getBlockTypeCounts(block: Blockly.Block, opt_stripFollowing?: boolean): Object; /** * Converts screen coordinates to workspace coordinates. * @param {Blockly.WorkspaceSvg} ws The workspace to find the coordinates on. * @param {Blockly.utils.Coordinate} screenCoordinates The screen coordinates to * be converted to workspace coordintaes * @return {Blockly.utils.Coordinate} The workspace coordinates. * @package */ function screenToWsCoordinates(ws: Blockly.WorkspaceSvg, screenCoordinates: Blockly.utils.Coordinate): Blockly.utils.Coordinate; } declare module Blockly.Events { class VarBase extends VarBase__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VarBase__Class extends Blockly.Events.Abstract__Class { /** * Abstract class for a variable event. * @param {Blockly.VariableModel} variable The variable this event corresponds * to. * @extends {Blockly.Events.Abstract} * @constructor */ constructor(variable: Blockly.VariableModel); /** * The variable id for the variable this event pertains to. * @type {string} */ varId: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; } class VarCreate extends VarCreate__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VarCreate__Class extends Blockly.Events.VarBase__Class { /** * Class for a variable creation event. * @param {Blockly.VariableModel} variable The created variable. * Null for a blank event. * @extends {Blockly.Events.VarBase} * @constructor */ constructor(variable: Blockly.VariableModel); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a variable creation event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class VarDelete extends VarDelete__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VarDelete__Class extends Blockly.Events.VarBase__Class { /** * Class for a variable deletion event. * @param {Blockly.VariableModel} variable The deleted variable. * Null for a blank event. * @extends {Blockly.Events.VarBase} * @constructor */ constructor(variable: Blockly.VariableModel); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a variable deletion event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class VarRename extends VarRename__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VarRename__Class extends Blockly.Events.VarBase__Class { /** * Class for a variable rename event. * @param {Blockly.VariableModel} variable The renamed variable. * Null for a blank event. * @param {string} newName The new name the variable will be changed to. * @extends {Blockly.Events.VarBase} * @constructor */ constructor(variable: Blockly.VariableModel, newName: string); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a variable rename event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } } declare module Blockly { class VariableMap extends VariableMap__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VariableMap__Class { /** * Class for a variable map. This contains a dictionary data structure with * variable types as keys and lists of variables as values. The list of * variables are the type indicated by the key. * @param {!Blockly.Workspace} workspace The workspace this map belongs to. * @constructor */ constructor(workspace: Blockly.Workspace); /** * The workspace this map belongs to. * @type {!Blockly.Workspace} */ workspace: Blockly.Workspace; /** * Clear the variable map. */ clear(): void; /** * Rename the given variable by updating its name in the variable map. * @param {!Blockly.VariableModel} variable Variable to rename. * @param {string} newName New variable name. * @package */ renameVariable(variable: Blockly.VariableModel, newName: string): void; /** * Rename a variable by updating its name in the variable map. Identify the * variable to rename with the given ID. * @param {string} id ID of the variable to rename. * @param {string} newName New variable name. */ renameVariableById(id: string, newName: string): void; /** * Create a variable with a given name, optional type, and optional ID. * @param {string} name The name of the variable. This must be unique across * variables and procedures. * @param {?string=} opt_type The type of the variable like 'int' or 'string'. * Does not need to be unique. Field_variable can filter variables based on * their type. This will default to '' which is a specific type. * @param {?string=} opt_id The unique ID of the variable. This will default to * a UUID. * @return {Blockly.VariableModel} The newly created variable. */ createVariable(name: string, opt_type?: string, opt_id?: string): Blockly.VariableModel; /** * Delete a variable. * @param {!Blockly.VariableModel} variable Variable to delete. */ deleteVariable(variable: Blockly.VariableModel): void; /** * Delete a variables by the passed in ID and all of its uses from this * workspace. May prompt the user for confirmation. * @param {string} id ID of variable to delete. */ deleteVariableById(id: string): void; /** * Find the variable by the given name and type and return it. Return null if * it is not found. * @param {string} name The name to check for. * @param {string=} opt_type The type of the variable. If not provided it * defaults to the empty string, which is a specific type. * @return {Blockly.VariableModel} The variable with the given name, or null if * it was not found. */ getVariable(name: string, opt_type?: string): Blockly.VariableModel; /** * Find the variable by the given ID and return it. Return null if it is not * found. * @param {string} id The ID to check for. * @return {Blockly.VariableModel} The variable with the given ID. */ getVariableById(id: string): Blockly.VariableModel; /** * Get a list containing all of the variables of a specified type. If type is * null, return list of variables with empty string type. * @param {?string} type Type of the variables to find. * @return {!Array.<!Blockly.VariableModel>} The sought after variables of the * passed in type. An empty array if none are found. */ getVariablesOfType(type: string): Blockly.VariableModel[]; /** * Return all variable and potential variable types. This list always contains * the empty string. * @param {?Blockly.Workspace} ws The workspace used to look for potential * variables. This can be different than the workspace stored on this object * if the passed in ws is a flyout workspace. * @return {!Array.<string>} List of variable types. * @package */ getVariableTypes(ws: Blockly.Workspace): string[]; /** * Return all variables of all types. * @return {!Array.<!Blockly.VariableModel>} List of variable models. */ getAllVariables(): Blockly.VariableModel[]; /** * Find all the uses of a named variable. * @param {string} id ID of the variable to find. * @return {!Array.<!Blockly.Block>} Array of block usages. */ getVariableUsesById(id: string): Blockly.Block[]; } } declare module Blockly { class VariableModel extends VariableModel__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class VariableModel__Class { /** * Class for a variable model. * Holds information for the variable including name, ID, and type. * @param {!Blockly.Workspace} workspace The variable's workspace. * @param {string} name The name of the variable. This must be unique across * variables and procedures. * @param {string=} opt_type The type of the variable like 'int' or 'string'. * Does not need to be unique. Field_variable can filter variables based on * their type. This will default to '' which is a specific type. * @param {string=} opt_id The unique ID of the variable. This will default to * a UUID. * @see {Blockly.FieldVariable} * @constructor */ constructor(workspace: Blockly.Workspace, name: string, opt_type?: string, opt_id?: string); /** * The workspace the variable is in. * @type {!Blockly.Workspace} */ workspace: Blockly.Workspace; /** * The name of the variable, typically defined by the user. It must be * unique across all names used for procedures and variables. It may be * changed by the user. * @type {string} */ name: string; /** * The type of the variable, such as 'int' or 'sound_effect'. This may be * used to build a list of variables of a specific type. By default this is * the empty string '', which is a specific type. * @see {Blockly.FieldVariable} * @type {string} */ type: string; /** * @return {string} The ID for the variable. */ getId(): string; } } declare module Blockly.VariableModel { /** * A custom compare function for the VariableModel objects. * @param {Blockly.VariableModel} var1 First variable to compare. * @param {Blockly.VariableModel} var2 Second variable to compare. * @return {number} -1 if name of var1 is less than name of var2, 0 if equal, * and 1 if greater. * @package */ function compareByName(var1: Blockly.VariableModel, var2: Blockly.VariableModel): number; } declare module Blockly.Variables { /** * Constant to separate variable names from procedures and generated functions * when running generators. * @deprecated Use Blockly.VARIABLE_CATEGORY_NAME */ var NAME_TYPE: any /*missing*/; /** * Find all user-created variables that are in use in the workspace. * For use by generators. * To get a list of all variables on a workspace, including unused variables, * call Workspace.getAllVariables. * @param {!Blockly.Workspace} ws The workspace to search for variables. * @return {!Array.<!Blockly.VariableModel>} Array of variable models. */ function allUsedVarModels(ws: Blockly.Workspace): Blockly.VariableModel[]; /** * Find all user-created variables that are in use in the workspace and return * only their names. * For use by generators. * To get a list of all variables on a workspace, including unused variables, * call Workspace.getAllVariables. * @deprecated January 2018 */ function allUsedVariables(): void; /** * Find all developer variables used by blocks in the workspace. * Developer variables are never shown to the user, but are declared as global * variables in the generated code. * To declare developer variables, define the getDeveloperVariables function on * your block and return a list of variable names. * For use by generators. * @param {!Blockly.Workspace} workspace The workspace to search. * @return {!Array.<string>} A list of non-duplicated variable names. */ function allDeveloperVariables(workspace: Blockly.Workspace): string[]; /** * Construct the elements (blocks and button) required by the flyout for the * variable category. * @param {!Blockly.Workspace} workspace The workspace containing variables. * @return {!Array.<!Element>} Array of XML elements. */ function flyoutCategory(workspace: Blockly.Workspace): Element[]; /** * Construct the blocks required by the flyout for the variable category. * @param {!Blockly.Workspace} workspace The workspace containing variables. * @return {!Array.<!Element>} Array of XML block elements. */ function flyoutCategoryBlocks(workspace: Blockly.Workspace): Element[]; /** * Return a new variable name that is not yet being used. This will try to * generate single letter variable names in the range 'i' to 'z' to start with. * If no unique name is located it will try 'i' to 'z', 'a' to 'h', * then 'i2' to 'z2' etc. Skip 'l'. * @param {!Blockly.Workspace} workspace The workspace to be unique in. * @return {string} New variable name. */ function generateUniqueName(workspace: Blockly.Workspace): string; /** * Handles "Create Variable" button in the default variables toolbox category. * It will prompt the user for a variable name, including re-prompts if a name * is already in use among the workspace's variables. * * Custom button handlers can delegate to this function, allowing variables * types and after-creation processing. More complex customization (e.g., * prompting for variable type) is beyond the scope of this function. * * @param {!Blockly.Workspace} workspace The workspace on which to create the * variable. * @param {function(?string=)=} opt_callback A callback. It will be passed an * acceptable new variable name, or null if change is to be aborted (cancel * button), or undefined if an existing variable was chosen. * @param {string=} opt_type The type of the variable like 'int', 'string', or * ''. This will default to '', which is a specific type. */ function createVariableButtonHandler(workspace: Blockly.Workspace, opt_callback?: { (_0: string): any /*missing*/ }, opt_type?: string): void; /** * Original name of Blockly.Variables.createVariableButtonHandler(..). * @deprecated Use Blockly.Variables.createVariableButtonHandler(..). * * @param {!Blockly.Workspace} workspace The workspace on which to create the * variable. * @param {function(?string=)=} opt_callback A callback. It will be passed an * acceptable new variable name, or null if change is to be aborted (cancel * button), or undefined if an existing variable was chosen. * @param {string=} opt_type The type of the variable like 'int', 'string', or * ''. This will default to '', which is a specific type. */ function createVariable(workspace: Blockly.Workspace, opt_callback?: { (_0: string): any /*missing*/ }, opt_type?: string): void; /** * Rename a variable with the given workspace, variableType, and oldName. * @param {!Blockly.Workspace} workspace The workspace on which to rename the * variable. * @param {Blockly.VariableModel} variable Variable to rename. * @param {function(?string=)=} opt_callback A callback. It will * be passed an acceptable new variable name, or null if change is to be * aborted (cancel button), or undefined if an existing variable was chosen. */ function renameVariable(workspace: Blockly.Workspace, variable: Blockly.VariableModel, opt_callback?: { (_0: string): any /*missing*/ }): void; /** * Prompt the user for a new variable name. * @param {string} promptText The string of the prompt. * @param {string} defaultText The default value to show in the prompt's field. * @param {function(?string)} callback A callback. It will return the new * variable name, or null if the user picked something illegal. */ function promptName(promptText: string, defaultText: string, callback: { (_0: string): any /*missing*/ }): void; /** * Generate DOM objects representing a variable field. * @param {!Blockly.VariableModel} variableModel The variable model to * represent. * @return {Element} The generated DOM. * @public */ function generateVariableFieldDom(variableModel: Blockly.VariableModel): Element; /** * Helper function to look up or create a variable on the given workspace. * If no variable exists, creates and returns it. * @param {!Blockly.Workspace} workspace The workspace to search for the * variable. It may be a flyout workspace or main workspace. * @param {?string} id The ID to use to look up or create the variable, or null. * @param {string=} opt_name The string to use to look up or create the * variable. * @param {string=} opt_type The type to use to look up or create the variable. * @return {!Blockly.VariableModel} The variable corresponding to the given ID * or name + type combination. */ function getOrCreateVariablePackage(workspace: Blockly.Workspace, id: string, opt_name?: string, opt_type?: string): Blockly.VariableModel; /** * Look up a variable on the given workspace. * Always looks in the main workspace before looking in the flyout workspace. * Always prefers lookup by ID to lookup by name + type. * @param {!Blockly.Workspace} workspace The workspace to search for the * variable. It may be a flyout workspace or main workspace. * @param {?string} id The ID to use to look up the variable, or null. * @param {string=} opt_name The string to use to look up the variable. * Only used if lookup by ID fails. * @param {string=} opt_type The type to use to look up the variable. * Only used if lookup by ID fails. * @return {Blockly.VariableModel} The variable corresponding to the given ID * or name + type combination, or null if not found. * @public */ function getVariable(workspace: Blockly.Workspace, id: string, opt_name?: string, opt_type?: string): Blockly.VariableModel; /** * Helper function to get the list of variables that have been added to the * workspace after adding a new block, using the given list of variables that * were in the workspace before the new block was added. * @param {!Blockly.Workspace} workspace The workspace to inspect. * @param {!Array.<!Blockly.VariableModel>} originalVariables The array of * variables that existed in the workspace before adding the new block. * @return {!Array.<!Blockly.VariableModel>} The new array of variables that * were freshly added to the workspace after creating the new block, * or [] if no new variables were added to the workspace. * @package */ function getAddedVariables(workspace: Blockly.Workspace, originalVariables: Blockly.VariableModel[]): Blockly.VariableModel[]; } declare module Blockly.VariablesDynamic { /** * Construct the elements (blocks and button) required by the flyout for the * variable category. * @param {!Blockly.Workspace} workspace The workspace containing variables. * @return {!Array.<!Element>} Array of XML elements. */ function flyoutCategory(workspace: Blockly.Workspace): Element[]; /** * Construct the blocks required by the flyout for the variable category. * @param {!Blockly.Workspace} workspace The workspace containing variables. * @return {!Array.<!Element>} Array of XML block elements. */ function flyoutCategoryBlocks(workspace: Blockly.Workspace): Element[]; } declare module Blockly { class Warning extends Warning__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Warning__Class extends Blockly.Icon__Class { /** * Class for a warning. * @param {!Blockly.Block} block The block associated with this warning. * @extends {Blockly.Icon} * @constructor */ constructor(block: Blockly.Block); /** * Does this icon get hidden when the block is collapsed. */ collapseHidden: any /*missing*/; /** * Show or hide the warning bubble. * @param {boolean} visible True if the bubble should be visible. */ setVisible(visible: boolean): void; /** * Show the bubble. * @package */ createBubble(): void; /** * Dispose of the bubble and references to it. * @package */ disposeBubble(): void; /** * Set this warning's text. * @param {string} text Warning text (or '' to delete). This supports * linebreaks. * @param {string} id An ID for this text entry to be able to maintain * multiple warnings. */ setText(text: string, id: string): void; /** * Get this warning's texts. * @return {string} All texts concatenated into one string. */ getText(): string; /** * Dispose of this warning. */ dispose(): void; } } declare module Blockly.WidgetDiv { /** * The HTML container. Set once by Blockly.WidgetDiv.createDom. * @type {Element} */ var DIV: Element; /** * Create the widget div and inject it onto the page. */ function createDom(): void; /** * Initialize and display the widget div. Close the old one if needed. * @param {!Object} newOwner The object that will be using this container. * @param {boolean} rtl Right-to-left (true) or left-to-right (false). * @param {Function} dispose Optional cleanup function to be run when the * widget is closed. */ function show(newOwner: Object, rtl: boolean, dispose: Function): void; /** * Destroy the widget and hide the div. */ function hide(): void; /** * Is the container visible? * @return {boolean} True if visible. */ function isVisible(): boolean; /** * Destroy the widget and hide the div if it is being used by the specified * object. * @param {!Object} oldOwner The object that was using this container. */ function hideIfOwner(oldOwner: Object): void; /** * Position the widget div based on an anchor rectangle. * The widget should be placed adjacent to but not overlapping the anchor * rectangle. The preferred position is directly below and aligned to the left * (LTR) or right (RTL) side of the anchor. * @param {!Object} viewportBBox The bounding rectangle of the current viewport, * in window coordinates. * @param {!Object} anchorBBox The bounding rectangle of the anchor, in window * coordinates. * @param {!Blockly.utils.Size} widgetSize The size of the widget that is inside the * widget div, in window coordinates. * @param {boolean} rtl Whether the workspace is in RTL mode. This determines * horizontal alignment. * @package */ function positionWithAnchor(viewportBBox: Object, anchorBBox: Object, widgetSize: Blockly.utils.Size, rtl: boolean): void; } declare module Blockly { class Workspace extends Workspace__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Workspace__Class { /** * Class for a workspace. This is a data structure that contains blocks. * There is no UI, and can be created headlessly. * @param {!Blockly.Options=} opt_options Dictionary of options. * @constructor */ constructor(opt_options?: Blockly.Options); /** @type {string} */ id: string; /** @type {!Blockly.Options} */ options: Blockly.Options; /** @type {boolean} */ RTL: boolean; /** @type {boolean} */ horizontalLayout: boolean; /** @type {number} */ toolboxPosition: number; /** * @type {!Array.<!Blockly.Events.Abstract>} * @protected */ undoStack_: Blockly.Events.Abstract[]; /** * @type {!Array.<!Blockly.Events.Abstract>} * @protected */ redoStack_: Blockly.Events.Abstract[]; /** * The cursor used to navigate around the AST for keyboard navigation. * @type {!Blockly.Cursor} * @protected */ cursor_: Blockly.Cursor; /** * The marker used to mark a location for keyboard navigation. * @type {!Blockly.MarkerCursor} * @protected */ marker_: Blockly.MarkerCursor; /** * Object in charge of storing and updating the workspace theme. * @type {!Blockly.ThemeManager} * @protected */ themeManager_: Blockly.ThemeManager; /** * Returns `true` if the workspace is visible and `false` if it's headless. * @type {boolean} */ rendered: boolean; /** * Returns `true` if the workspace is currently in the process of a bulk clear. * @type {boolean} * @package */ isClearing: boolean; /** * Maximum number of undo events in stack. `0` turns off undo, `Infinity` sets * it to unlimited. * @type {number} */ MAX_UNDO: number; /** * Set of databases for rapid lookup of connection locations. * @type {Array.<!Blockly.ConnectionDB>} */ connectionDBList: Blockly.ConnectionDB[]; /** * Sets the cursor for keyboard navigation. * @param {Blockly.Cursor} cursor The cursor used to navigate around the Blockly * AST for keyboard navigation. */ setCursor(cursor: Blockly.Cursor): void; /** * Sets the marker for keyboard navigation. * @param {Blockly.MarkerCursor} marker The marker used to mark a location for * keyboard navigation. */ setMarker(marker: Blockly.MarkerCursor): void; /** * Get the cursor used to navigate around the AST for keyboard navigation. * @return {Blockly.Cursor} The cursor for this workspace. */ getCursor(): Blockly.Cursor; /** * Get the marker used to mark a location for keyboard navigation. * @return {Blockly.MarkerCursor} the marker for this workspace. */ getMarker(): Blockly.MarkerCursor; /** * Get the workspace theme object. * @return {!Blockly.Theme} The workspace theme object. */ getTheme(): Blockly.Theme; /** * Set the workspace theme object. * If no theme is passed, default to the `Blockly.Themes.Classic` theme. * @param {Blockly.Theme} theme The workspace theme object. */ setTheme(theme: Blockly.Theme): void; /** * Refresh all blocks on the workspace after a theme update. * @package */ refreshTheme(): void; /** * Dispose of this workspace. * Unlink from all DOM elements to prevent memory leaks. * @suppress {checkTypes} */ dispose(): void; /** * Add a block to the list of top blocks. * @param {!Blockly.Block} block Block to add. */ addTopBlock(block: Blockly.Block): void; /** * Remove a block from the list of top blocks. * @param {!Blockly.Block} block Block to remove. */ removeTopBlock(block: Blockly.Block): void; /** * Finds the top-level blocks and returns them. Blocks are optionally sorted * by position; top to bottom (with slight LTR or RTL bias). * @param {boolean} ordered Sort the list if true. * @return {!Array.<!Blockly.Block>} The top-level block objects. */ getTopBlocks(ordered: boolean): Blockly.Block[]; /** * Add a block to the list of blocks keyed by type. * @param {!Blockly.Block} block Block to add. */ addTypedBlock(block: Blockly.Block): void; /** * Remove a block from the list of blocks keyed by type. * @param {!Blockly.Block} block Block to remove. */ removeTypedBlock(block: Blockly.Block): void; /** * Finds the blocks with the associated type and returns them. Blocks are * optionally sorted by position; top to bottom (with slight LTR or RTL bias). * @param {string} type The type of block to search for. * @param {boolean} ordered Sort the list if true. * @return {!Array.<!Blockly.Block>} The blocks of the given type. */ getBlocksByType(type: string, ordered: boolean): Blockly.Block[]; /** * Add a comment to the list of top comments. * @param {!Blockly.WorkspaceComment} comment comment to add. * @package */ addTopComment(comment: Blockly.WorkspaceComment): void; /** * Remove a comment from the list of top comments. * @param {!Blockly.WorkspaceComment} comment comment to remove. * @package */ removeTopComment(comment: Blockly.WorkspaceComment): void; /** * Finds the top-level comments and returns them. Comments are optionally * sorted by position; top to bottom (with slight LTR or RTL bias). * @param {boolean} ordered Sort the list if true. * @return {!Array.<!Blockly.WorkspaceComment>} The top-level comment objects. * @package */ getTopComments(ordered: boolean): Blockly.WorkspaceComment[]; /** * Find all blocks in workspace. Blocks are optionally sorted * by position; top to bottom (with slight LTR or RTL bias). * @param {boolean} ordered Sort the list if true. * @return {!Array.<!Blockly.Block>} Array of blocks. */ getAllBlocks(ordered: boolean): Blockly.Block[]; /** * Dispose of all blocks and comments in workspace. */ clear(): void; /** * Rename a variable by updating its name in the variable map. Identify the * variable to rename with the given ID. * @param {string} id ID of the variable to rename. * @param {string} newName New variable name. */ renameVariableById(id: string, newName: string): void; /** * Create a variable with a given name, optional type, and optional ID. * @param {string} name The name of the variable. This must be unique across * variables and procedures. * @param {?string=} opt_type The type of the variable like 'int' or 'string'. * Does not need to be unique. Field_variable can filter variables based on * their type. This will default to '' which is a specific type. * @param {?string=} opt_id The unique ID of the variable. This will default to * a UUID. * @return {Blockly.VariableModel} The newly created variable. */ createVariable(name: string, opt_type?: string, opt_id?: string): Blockly.VariableModel; /** * Find all the uses of the given variable, which is identified by ID. * @param {string} id ID of the variable to find. * @return {!Array.<!Blockly.Block>} Array of block usages. */ getVariableUsesById(id: string): Blockly.Block[]; /** * Delete a variables by the passed in ID and all of its uses from this * workspace. May prompt the user for confirmation. * @param {string} id ID of variable to delete. */ deleteVariableById(id: string): void; /** * Check whether a variable exists with the given name. The check is * case-insensitive. * @param {string} _name The name to check for. * @return {number} The index of the name in the variable list, or -1 if it is * not present. * @deprecated April 2017 */ variableIndexOf(_name: string): number; /** * Find the variable by the given name and return it. Return null if it is not * found. * @param {string} name The name to check for. * @param {string=} opt_type The type of the variable. If not provided it * defaults to the empty string, which is a specific type. * @return {Blockly.VariableModel} The variable with the given name. */ getVariable(name: string, opt_type?: string): Blockly.VariableModel; /** * Find the variable by the given ID and return it. Return null if it is not * found. * @param {string} id The ID to check for. * @return {Blockly.VariableModel} The variable with the given ID. */ getVariableById(id: string): Blockly.VariableModel; /** * Find the variable with the specified type. If type is null, return list of * variables with empty string type. * @param {?string} type Type of the variables to find. * @return {Array.<Blockly.VariableModel>} The sought after variables of the * passed in type. An empty array if none are found. */ getVariablesOfType(type: string): Blockly.VariableModel[]; /** * Return all variable types. * @return {!Array.<string>} List of variable types. * @package */ getVariableTypes(): string[]; /** * Return all variables of all types. * @return {!Array.<!Blockly.VariableModel>} List of variable models. */ getAllVariables(): Blockly.VariableModel[]; /** * Returns the horizontal offset of the workspace. * Intended for LTR/RTL compatibility in XML. * Not relevant for a headless workspace. * @return {number} Width. */ getWidth(): number; /** * Obtain a newly created block. * @param {?string} prototypeName Name of the language object containing * type-specific functions for this block. * @param {string=} opt_id Optional ID. Use this ID if provided, otherwise * create a new ID. * @return {!Blockly.Block} The created block. */ newBlock(prototypeName: string, opt_id?: string): Blockly.Block; /** * The number of blocks that may be added to the workspace before reaching * the maxBlocks. * @return {number} Number of blocks left. */ remainingCapacity(): number; /** * The number of blocks of the given type that may be added to the workspace * before reaching the maxInstances allowed for that type. * @param {string} type Type of block to return capacity for. * @return {number} Number of blocks of type left. */ remainingCapacityOfType(type: string): number; /** * Check if there is remaining capacity for blocks of the given counts to be * created. If the total number of blocks represented by the map is more than * the total remaining capacity, it returns false. If a type count is more * than the remaining capacity for that type, it returns false. * @param {!Object} typeCountsMap A map of types to counts (usually representing * blocks to be created). * @return {boolean} True if there is capacity for the given map, * false otherwise. */ isCapacityAvailable(typeCountsMap: Object): boolean; /** * Checks if the workspace has any limits on the maximum number of blocks, * or the maximum number of blocks of specific types. * @return {boolean} True if it has block limits, false otherwise. */ hasBlockLimits(): boolean; /** * Undo or redo the previous action. * @param {boolean} redo False if undo, true if redo. */ undo(redo: boolean): void; /** * Clear the undo/redo stacks. */ clearUndo(): void; /** * When something in this workspace changes, call a function. * Note that there may be a few recent events already on the stack. Thus the * new change listener might be called with events that occurred a few * milliseconds before the change listener was added. * @param {!Function} func Function to call. * @return {!Function} Obsolete return value, ignore. */ addChangeListener(func: Function): Function; /** * Stop listening for this workspace's changes. * @param {Function} func Function to stop calling. */ removeChangeListener(func: Function): void; /** * Fire a change event. * @param {!Blockly.Events.Abstract} event Event to fire. */ fireChangeListener(event: Blockly.Events.Abstract): void; /** * Find the block on this workspace with the specified ID. * @param {string} id ID of block to find. * @return {Blockly.Block} The sought after block, or null if not found. */ getBlockById(id: string): Blockly.Block; /** * Find the comment on this workspace with the specified ID. * @param {string} id ID of comment to find. * @return {Blockly.WorkspaceComment} The sought after comment, or null if not * found. * @package */ getCommentById(id: string): Blockly.WorkspaceComment; /** * Checks whether all value and statement inputs in the workspace are filled * with blocks. * @param {boolean=} opt_shadowBlocksAreFilled An optional argument controlling * whether shadow blocks are counted as filled. Defaults to true. * @return {boolean} True if all inputs are filled, false otherwise. */ allInputsFilled(opt_shadowBlocksAreFilled?: boolean): boolean; /** * Return the variable map that contains "potential" variables. * These exist in the flyout but not in the workspace. * @return {Blockly.VariableMap} The potential variable map. * @package */ getPotentialVariableMap(): Blockly.VariableMap; /** * Create and store the potential variable map for this workspace. * @package */ createPotentialVariableMap(): void; /** * Return the map of all variables on the workspace. * @return {Blockly.VariableMap} The variable map. */ getVariableMap(): Blockly.VariableMap; /** * Get the theme manager for this workspace. * @return {!Blockly.ThemeManager} The theme manager for this workspace. * @package */ getThemeManager(): Blockly.ThemeManager; } } declare module Blockly.Workspace { /** * Angle away from the horizontal to sweep for blocks. Order of execution is * generally top to bottom, but a small angle changes the scan to give a bit of * a left to right bias (reversed in RTL). Units are in degrees. * See: https://tvtropes.org/pmwiki/pmwiki.php/Main/DiagonalBilling */ var SCAN_ANGLE: any /*missing*/; /** * Find the workspace with the specified ID. * @param {string} id ID of workspace to find. * @return {Blockly.Workspace} The sought after workspace or null if not found. */ function getById(id: string): Blockly.Workspace; /** * Find all workspaces. * @return {!Array.<!Blockly.Workspace>} Array of workspaces. */ function getAll(): Blockly.Workspace[]; } declare module Blockly { class WorkspaceAudio extends WorkspaceAudio__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class WorkspaceAudio__Class { /** * Class for loading, storing, and playing audio for a workspace. * @param {Blockly.WorkspaceSvg} parentWorkspace The parent of the workspace * this audio object belongs to, or null. * @constructor */ constructor(parentWorkspace: Blockly.WorkspaceSvg); /** * Dispose of this audio manager. * @package */ dispose(): void; /** * Load an audio file. Cache it, ready for instantaneous playing. * @param {!Array.<string>} filenames List of file types in decreasing order of * preference (i.e. increasing size). E.g. ['media/go.mp3', 'media/go.wav'] * Filenames include path from Blockly's root. File extensions matter. * @param {string} name Name of sound. */ load(filenames: string[], name: string): void; /** * Preload all the audio files so that they play quickly when asked for. * @package */ preload(): void; /** * Play a named sound at specified volume. If volume is not specified, * use full volume (1). * @param {string} name Name of sound. * @param {number=} opt_volume Volume of sound (0-1). */ play(name: string, opt_volume?: number): void; } } declare module Blockly { class WorkspaceComment extends WorkspaceComment__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class WorkspaceComment__Class { /** * Class for a workspace comment. * @param {!Blockly.Workspace} workspace The block's workspace. * @param {string} content The content of this workspace comment. * @param {number} height Height of the comment. * @param {number} width Width of the comment. * @param {string=} opt_id Optional ID. Use this ID if provided, otherwise * create a new ID. * @constructor */ constructor(workspace: Blockly.Workspace, content: string, height: number, width: number, opt_id?: string); /** @type {string} */ id: string; /** * The comment's position in workspace units. (0, 0) is at the workspace's * origin; scale does not change this value. * @type {!Blockly.utils.Coordinate} * @protected */ xy_: Blockly.utils.Coordinate; /** * @type {!Blockly.Workspace} */ workspace: Blockly.Workspace; /** * @protected * @type {boolean} */ RTL: boolean; /** * @protected * @type {string} */ content_: string; /** * @package * @type {boolean} */ isComment: boolean; /** * Dispose of this comment. * @package */ dispose(): void; /** * Get comment height. * @return {number} Comment height. * @package */ getHeight(): number; /** * Set comment height. * @param {number} height Comment height. * @package */ setHeight(height: number): void; /** * Get comment width. * @return {number} Comment width. * @package */ getWidth(): number; /** * Set comment width. * @param {number} width comment width. * @package */ setWidth(width: number): void; /** * Get stored location. * @return {!Blockly.utils.Coordinate} The comment's stored location. * This is not valid if the comment is currently being dragged. * @package */ getXY(): Blockly.utils.Coordinate; /** * Move a comment by a relative offset. * @param {number} dx Horizontal offset, in workspace units. * @param {number} dy Vertical offset, in workspace units. * @package */ moveBy(dx: number, dy: number): void; /** * Get whether this comment is deletable or not. * @return {boolean} True if deletable. * @package */ isDeletable(): boolean; /** * Set whether this comment is deletable or not. * @param {boolean} deletable True if deletable. * @package */ setDeletable(deletable: boolean): void; /** * Get whether this comment is movable or not. * @return {boolean} True if movable. * @package */ isMovable(): boolean; /** * Set whether this comment is movable or not. * @param {boolean} movable True if movable. * @package */ setMovable(movable: boolean): void; /** * Returns this comment's text. * @return {string} Comment text. * @package */ getContent(): string; /** * Set this comment's content. * @param {string} content Comment content. * @package */ setContent(content: string): void; /** * Encode a comment subtree as XML with XY coordinates. * @param {boolean=} opt_noId True if the encoder should skip the comment ID. * @return {!Element} Tree of XML elements. * @package */ toXmlWithXY(opt_noId?: boolean): Element; /** * Encode a comment subtree as XML, but don't serialize the XY coordinates. * This method avoids some expensive metrics-related calls that are made in * toXmlWithXY(). * @param {boolean=} opt_noId True if the encoder should skip the comment ID. * @return {!Element} Tree of XML elements. * @package */ toXml(opt_noId?: boolean): Element; } } declare module Blockly.WorkspaceComment { /** * Fire a create event for the given workspace comment, if comments are enabled. * @param {!Blockly.WorkspaceComment} comment The comment that was just created. * @package */ function fireCreateEvent(comment: Blockly.WorkspaceComment): void; /** * Decode an XML comment tag and create a comment on the workspace. * @param {!Element} xmlComment XML comment element. * @param {!Blockly.Workspace} workspace The workspace. * @return {!Blockly.WorkspaceComment} The created workspace comment. * @package */ function fromXml(xmlComment: Element, workspace: Blockly.Workspace): Blockly.WorkspaceComment; /** * Decode an XML comment tag and return the results in an object. * @param {!Element} xml XML comment element. * @return {{w: number, h: number, x: number, y: number, content: string}} An * object containing the id, size, position, and comment string. * @package */ function parseAttributes(xml: Element): { w: number; h: number; x: number; y: number; content: string }; } declare module Blockly { class WorkspaceCommentSvg extends WorkspaceCommentSvg__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class WorkspaceCommentSvg__Class extends Blockly.WorkspaceComment__Class { /** * Class for a workspace comment's SVG representation. * @param {!Blockly.Workspace} workspace The block's workspace. * @param {string} content The content of this workspace comment. * @param {number} height Height of the comment. * @param {number} width Width of the comment. * @param {string=} opt_id Optional ID. Use this ID if provided, otherwise * create a new ID. * @extends {Blockly.WorkspaceComment} * @constructor */ constructor(workspace: Blockly.Workspace, content: string, height: number, width: number, opt_id?: string); /** * Dispose of this comment. * @package */ dispose(): void; /** * Create and initialize the SVG representation of a workspace comment. * May be called more than once. * @package */ initSvg(): void; /** * Select this comment. Highlight it visually. * @package */ select(): void; /** * Unselect this comment. Remove its highlighting. * @package */ unselect(): void; /** * Select this comment. Highlight it visually. * @package */ addSelect(): void; /** * Unselect this comment. Remove its highlighting. * @package */ removeSelect(): void; /** * Focus this comment. Highlight it visually. * @package */ addFocus(): void; /** * Unfocus this comment. Remove its highlighting. * @package */ removeFocus(): void; /** * Return the coordinates of the top-left corner of this comment relative to * the drawing surface's origin (0,0), in workspace units. * If the comment is on the workspace, (0, 0) is the origin of the workspace * coordinate system. * This does not change with workspace scale. * @return {!Blockly.utils.Coordinate} Object with .x and .y properties in * workspace coordinates. * @package */ getRelativeToSurfaceXY(): Blockly.utils.Coordinate; /** * Move a comment by a relative offset. * @param {number} dx Horizontal offset, in workspace units. * @param {number} dy Vertical offset, in workspace units. * @package */ moveBy(dx: number, dy: number): void; /** * Transforms a comment by setting the translation on the transform attribute * of the block's SVG. * @param {number} x The x coordinate of the translation in workspace units. * @param {number} y The y coordinate of the translation in workspace units. * @package */ translate(x: number, y: number): void; /** * Move this comment during a drag, taking into account whether we are using a * drag surface to translate blocks. * @param {Blockly.BlockDragSurfaceSvg} dragSurface The surface that carries * rendered items during a drag, or null if no drag surface is in use. * @param {!Blockly.utils.Coordinate} newLoc The location to translate to, in * workspace coordinates. * @package */ moveDuringDrag(dragSurface: Blockly.BlockDragSurfaceSvg, newLoc: Blockly.utils.Coordinate): void; /** * Move the bubble group to the specified location in workspace coordinates. * @param {number} x The x position to move to. * @param {number} y The y position to move to. * @package */ moveTo(x: number, y: number): void; /** * Returns the coordinates of a bounding box describing the dimensions of this * comment. * Coordinate system: workspace coordinates. * @return {!Blockly.utils.Rect} Object with coordinates of the bounding box. * @package */ getBoundingRectangle(): Blockly.utils.Rect; /** * Add or remove the UI indicating if this comment is movable or not. * @package */ updateMovable(): void; /** * Set whether this comment is movable or not. * @param {boolean} movable True if movable. * @package */ setMovable(movable: boolean): void; /** * Recursively adds or removes the dragging class to this node and its children. * @param {boolean} adding True if adding, false if removing. * @package */ setDragging(adding: boolean): void; /** * Return the root node of the SVG or null if none exists. * @return {SVGElement} The root SVG node (probably a group). * @package */ getSvgRoot(): SVGElement; /** * Returns this comment's text. * @return {string} Comment text. * @package */ getContent(): string; /** * Set this comment's content. * @param {string} content Comment content. * @package */ setContent(content: string): void; /** * Update the cursor over this comment by adding or removing a class. * @param {boolean} enable True if the delete cursor should be shown, false * otherwise. * @package */ setDeleteStyle(enable: boolean): void; /** * Encode a comment subtree as XML with XY coordinates. * @param {boolean=} opt_noId True if the encoder should skip the comment ID. * @return {!Element} Tree of XML elements. * @package */ toXmlWithXY(opt_noId?: boolean): Element; } } declare module Blockly.WorkspaceCommentSvg { /** * The width and height to use to size a workspace comment when it is first * added, before it has been edited by the user. * @type {number} * @package */ var DEFAULT_SIZE: number; /** * Decode an XML comment tag and create a rendered comment on the workspace. * @param {!Element} xmlComment XML comment element. * @param {!Blockly.Workspace} workspace The workspace. * @param {number=} opt_wsWidth The width of the workspace, which is used to * position comments correctly in RTL. * @return {!Blockly.WorkspaceCommentSvg} The created workspace comment. * @package */ function fromXml(xmlComment: Element, workspace: Blockly.Workspace, opt_wsWidth?: number): Blockly.WorkspaceCommentSvg; } declare module Blockly { class WorkspaceDragSurfaceSvg extends WorkspaceDragSurfaceSvg__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class WorkspaceDragSurfaceSvg__Class { /** * Blocks are moved into this SVG during a drag, improving performance. * The entire SVG is translated using CSS transforms instead of SVG so the * blocks are never repainted during drag improving performance. * @param {!Element} container Containing element. * @constructor */ constructor(container: Element); /** * Dom structure when the workspace is being dragged. If there is no drag in * progress, the SVG is empty and display: none. * <svg class="blocklyWsDragSurface" style=transform:translate3d(...)> * <g class="blocklyBlockCanvas"></g> * <g class="blocklyBubbleCanvas">/g> * </svg> */ SVG_: any /*missing*/; /** * Create the drag surface and inject it into the container. */ createDom(): void; /** * Translate the entire drag surface during a drag. * We translate the drag surface instead of the blocks inside the surface * so that the browser avoids repainting the SVG. * Because of this, the drag coordinates must be adjusted by scale. * @param {number} x X translation for the entire surface * @param {number} y Y translation for the entire surface * @package */ translateSurface(x: number, y: number): void; /** * Reports the surface translation in scaled workspace coordinates. * Use this when finishing a drag to return blocks to the correct position. * @return {!Blockly.utils.Coordinate} Current translation of the surface * @package */ getSurfaceTranslation(): Blockly.utils.Coordinate; /** * Move the blockCanvas and bubbleCanvas out of the surface SVG and on to * newSurface. * @param {SVGElement} newSurface The element to put the drag surface contents * into. * @package */ clearAndHide(newSurface: SVGElement): void; /** * Set the SVG to have the block canvas and bubble canvas in it and then * show the surface. * @param {!SVGElement} blockCanvas The block canvas <g> element from the * workspace. * @param {!SVGElement} bubbleCanvas The <g> element that contains the bubbles. * @param {Element} previousSibling The element to insert the block canvas and bubble canvas after when it goes back in the DOM at the end of a drag. * @param {number} width The width of the workspace SVG element. * @param {number} height The height of the workspace SVG element. * @param {number} scale The scale of the workspace being dragged. * @package */ setContentsAndShow(blockCanvas: SVGElement, bubbleCanvas: SVGElement, previousSibling: Element, width: number, height: number, scale: number): void; } } declare module Blockly { class WorkspaceDragger extends WorkspaceDragger__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class WorkspaceDragger__Class { /** * Class for a workspace dragger. It moves the workspace around when it is * being dragged by a mouse or touch. * Note that the workspace itself manages whether or not it has a drag surface * and how to do translations based on that. This simply passes the right * commands based on events. * @param {!Blockly.WorkspaceSvg} workspace The workspace to drag. * @constructor */ constructor(workspace: Blockly.WorkspaceSvg); /** * Sever all links from this object. * @package */ dispose(): void; /** * Start dragging the workspace. * @package */ startDrag(): void; /** * Finish dragging the workspace and put everything back where it belongs. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel coordinates. * @package */ endDrag(currentDragDeltaXY: Blockly.utils.Coordinate): void; /** * Move the workspace based on the most recent mouse movements. * @param {!Blockly.utils.Coordinate} currentDragDeltaXY How far the pointer has * moved from the position at the start of the drag, in pixel coordinates. * @package */ drag(currentDragDeltaXY: Blockly.utils.Coordinate): void; } } declare module Blockly.Events { class FinishedLoading extends FinishedLoading__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FinishedLoading__Class extends Blockly.Events.Abstract__Class { /** * Class for a finished loading event. * Used to notify the developer when the workspace has finished loading (i.e * domToWorkspace). * Finished loading events do not record undo or redo. * @param {!Blockly.Workspace} workspace The workspace that has finished * loading. * @extends {Blockly.Events.Abstract} * @constructor */ constructor(workspace: Blockly.Workspace); /** * The workspace identifier for this event. * @type {string} */ workspaceId: string; /** * The event group id for the group this event belongs to. Groups define * events that should be treated as an single action from the user's * perspective, and should be undone together. * @type {string} */ group: string; /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; } } declare module Blockly { class WorkspaceSvg extends WorkspaceSvg__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class WorkspaceSvg__Class extends Blockly.Workspace__Class { /** * Class for a workspace. This is an onscreen area with optional trashcan, * scrollbars, bubbles, and dragging. * @param {!Blockly.Options} options Dictionary of options. * @param {Blockly.BlockDragSurfaceSvg=} opt_blockDragSurface Drag surface for * blocks. * @param {Blockly.WorkspaceDragSurfaceSvg=} opt_wsDragSurface Drag surface for * the workspace. * @extends {Blockly.Workspace} * @constructor */ constructor(options: Blockly.Options, opt_blockDragSurface?: Blockly.BlockDragSurfaceSvg, opt_wsDragSurface?: Blockly.WorkspaceDragSurfaceSvg); /** * A wrapper function called when a resize event occurs. * You can pass the result to `unbindEvent_`. * @type {Array.<!Array>} */ resizeHandlerWrapper_: any[][]; /** * The render status of an SVG workspace. * Returns `false` for headless workspaces and true for instances of * `Blockly.WorkspaceSvg`. * @type {boolean} */ rendered: boolean; /** * Is this workspace the surface for a flyout? * @type {boolean} */ isFlyout: boolean; /** * Is this workspace the surface for a mutator? * @type {boolean} * @package */ isMutator: boolean; /** * Current horizontal scrolling offset in pixel units, relative to the * workspace origin. * * It is useful to think about a view, and a canvas moving beneath that * view. As the canvas moves right, this value becomes more positive, and * the view is now "seeing" the left side of the canvas. As the canvas moves * left, this value becomes more negative, and the view is now "seeing" the * right side of the canvas. * * The confusing thing about this value is that it does not, and must not * include the absoluteLeft offset. This is because it is used to calculate * the viewLeft value. * * The viewLeft is relative to the workspace origin (although in pixel * units). The workspace origin is the top-left corner of the workspace (at * least when it is enabled). It is shifted from the top-left of the blocklyDiv * so as not to be beneath the toolbox. * * When the workspace is enabled the viewLeft and workspace origin are at * the same X location. As the canvas slides towards the right beneath the view * this value (scrollX) becomes more positive, and the viewLeft becomes more * negative relative to the workspace origin (imagine the workspace origin * as a dot on the canvas sliding to the right as the canvas moves). * * So if the scrollX were to include the absoluteLeft this would in a way * "unshift" the workspace origin. This means that the viewLeft would be * representing the left edge of the blocklyDiv, rather than the left edge * of the workspace. * * @type {number} */ scrollX: number; /** * Current vertical scrolling offset in pixel units, relative to the * workspace origin. * * It is useful to think about a view, and a canvas moving beneath that * view. As the canvas moves down, this value becomes more positive, and the * view is now "seeing" the upper part of the canvas. As the canvas moves * up, this value becomes more negative, and the view is "seeing" the lower * part of the canvas. * * This confusing thing about this value is that it does not, and must not * include the absoluteTop offset. This is because it is used to calculate * the viewTop value. * * The viewTop is relative to the workspace origin (although in pixel * units). The workspace origin is the top-left corner of the workspace (at * least when it is enabled). It is shifted from the top-left of the * blocklyDiv so as not to be beneath the toolbox. * * When the workspace is enabled the viewTop and workspace origin are at the * same Y location. As the canvas slides towards the bottom this value * (scrollY) becomes more positive, and the viewTop becomes more negative * relative to the workspace origin (image in the workspace origin as a dot * on the canvas sliding downwards as the canvas moves). * * So if the scrollY were to include the absoluteTop this would in a way * "unshift" the workspace origin. This means that the viewTop would be * representing the top edge of the blocklyDiv, rather than the top edge of * the workspace. * * @type {number} */ scrollY: number; /** * Horizontal scroll value when scrolling started in pixel units. * @type {number} */ startScrollX: number; /** * Vertical scroll value when scrolling started in pixel units. * @type {number} */ startScrollY: number; /** * Current scale. * @type {number} */ scale: number; /** @type {Blockly.Trashcan} */ trashcan: Blockly.Trashcan; /** * This workspace's scrollbars, if they exist. * @type {Blockly.ScrollbarPair} */ scrollbar: Blockly.ScrollbarPair; /** * Developers may define this function to add custom menu options to the * workspace's context menu or edit the workspace-created set of menu options. * @param {!Array.<!Object>} options List of menu options to add to. */ configureContextMenu(options: Object[]): void; /** * In a flyout, the target workspace where blocks should be placed after a drag. * Otherwise null. * @type {Blockly.WorkspaceSvg} * @package */ targetWorkspace: Blockly.WorkspaceSvg; /** * Get the block renderer attached to this workspace. * @return {!Blockly.blockRendering.Renderer} The renderer attached to this workspace. */ getRenderer(): Blockly.blockRendering.Renderer; /** * Add the cursor svg to this workspaces svg group. * @param {SVGElement} cursorSvg The svg root of the cursor to be added to the * workspace svg group. * @package */ setCursorSvg(cursorSvg: SVGElement): void; /** * Add the marker svg to this workspaces svg group. * @param {SVGElement} markerSvg The svg root of the marker to be added to the * workspace svg group. * @package */ setMarkerSvg(markerSvg: SVGElement): void; /** * Getter for the inverted screen CTM. * @return {SVGMatrix} The matrix to use in mouseToSvg */ getInverseScreenCTM(): SVGMatrix; /** * Mark the inverse screen CTM as dirty. */ updateInverseScreenCTM(): void; /** * Getter for isVisible * @return {boolean} Whether the workspace is visible. * False if the workspace has been hidden by calling `setVisible(false)`. */ isVisible(): boolean; /** * Return the position of the workspace origin relative to the injection div * origin in pixels. * The workspace origin is where a block would render at position (0, 0). * It is not the upper left corner of the workspace SVG. * @return {!Blockly.utils.Coordinate} Offset in pixels. * @package */ getOriginOffsetInPixels(): Blockly.utils.Coordinate; /** * Return the injection div that is a parent of this workspace. * Walks the DOM the first time it's called, then returns a cached value. * @return {!Element} The first parent div with 'injectionDiv' in the name. * @package */ getInjectionDiv(): Element; /** * Save resize handler data so we can delete it later in dispose. * @param {!Array.<!Array>} handler Data that can be passed to unbindEvent_. */ setResizeHandlerWrapper(handler: any[][]): void; /** * Create the workspace DOM elements. * @param {string=} opt_backgroundClass Either 'blocklyMainBackground' or * 'blocklyMutatorBackground'. * @return {!Element} The workspace's SVG group. */ createDom(opt_backgroundClass?: string): Element; /** * <g class="blocklyWorkspace"> * <rect class="blocklyMainBackground" height="100%" width="100%"></rect> * [Trashcan and/or flyout may go here] * <g class="blocklyBlockCanvas"></g> * <g class="blocklyBubbleCanvas"></g> * </g> * @type {SVGElement} */ svgGroup_: SVGElement; /** @type {SVGElement} */ svgBackground_: SVGElement; /** @type {SVGElement} */ svgBlockCanvas_: SVGElement; /** @type {SVGElement} */ svgBubbleCanvas_: SVGElement; /** * Dispose of this workspace. * Unlink from all DOM elements to prevent memory leaks. */ dispose(): void; /** * Add a trashcan. * @package */ addTrashcan(): void; /** * Add zoom controls. * @package */ addZoomControls(): void; /** @type {Blockly.ZoomControls} */ zoomControls_: Blockly.ZoomControls; /** * Getter for the flyout associated with this workspace. This flyout may be * owned by either the toolbox or the workspace, depending on toolbox * configuration. It will be null if there is no flyout. * @return {Blockly.Flyout} The flyout on this workspace. * @package */ getFlyout(): Blockly.Flyout; /** * Getter for the toolbox associated with this workspace, if one exists. * @return {Blockly.Toolbox} The toolbox on this workspace. * @package */ getToolbox(): Blockly.Toolbox; /** * If enabled, resize the parts of the workspace that change when the workspace * contents (e.g. block positions) change. This will also scroll the * workspace contents if needed. * @package */ resizeContents(): void; /** * Resize and reposition all of the workspace chrome (toolbox, * trash, scrollbars etc.) * This should be called when something changes that * requires recalculating dimensions and positions of the * trash, zoom, toolbox, etc. (e.g. window resize). */ resize(): void; /** * Resizes and repositions workspace chrome if the page has a new * scroll position. * @package */ updateScreenCalculationsIfScrolled(): void; /** * Get the SVG element that forms the drawing surface. * @return {!SVGElement} SVG element. */ getCanvas(): SVGElement; /** * Get the SVG element that forms the bubble surface. * @return {!SVGGElement} SVG element. */ getBubbleCanvas(): SVGGElement; /** * Get the SVG element that contains this workspace. * @return {SVGElement} SVG element. */ getParentSvg(): SVGElement; /** * Translate this workspace to new coordinates. * @param {number} x Horizontal translation, in pixel units relative to the * top left of the Blockly div. * @param {number} y Vertical translation, in pixel units relative to the * top left of the Blockly div. */ translate(x: number, y: number): void; /** * Called at the end of a workspace drag to take the contents * out of the drag surface and put them back into the workspace SVG. * Does nothing if the workspace drag surface is not enabled. * @package */ resetDragSurface(): void; /** * Called at the beginning of a workspace drag to move contents of * the workspace to the drag surface. * Does nothing if the drag surface is not enabled. * @package */ setupDragSurface(): void; /** * @return {Blockly.BlockDragSurfaceSvg} This workspace's block drag surface, * if one is in use. * @package */ getBlockDragSurface(): Blockly.BlockDragSurfaceSvg; /** * Returns the horizontal offset of the workspace. * Intended for LTR/RTL compatibility in XML. * @return {number} Width. */ getWidth(): number; /** * Toggles the visibility of the workspace. * Currently only intended for main workspace. * @param {boolean} isVisible True if workspace should be visible. */ setVisible(isVisible: boolean): void; /** * Render all blocks in workspace. */ render(): void; /** * Was used back when block highlighting (for execution) and block selection * (for editing) were the same thing. * Any calls of this function can be deleted. * @deprecated October 2016 */ traceOn(): void; /** * Highlight or unhighlight a block in the workspace. Block highlighting is * often used to visually mark blocks currently being executed. * @param {?string} id ID of block to highlight/unhighlight, * or null for no block (used to unhighlight all blocks). * @param {boolean=} opt_state If undefined, highlight specified block and * automatically unhighlight all others. If true or false, manually * highlight/unhighlight the specified block. */ highlightBlock(id: string, opt_state?: boolean): void; /** * Paste the provided block onto the workspace. * @param {!Element} xmlBlock XML block element. */ paste(xmlBlock: Element): void; /** * Paste the provided block onto the workspace. * @param {!Element} xmlBlock XML block element. */ pasteBlock_(xmlBlock: Element): void; /** * Refresh the toolbox unless there's a drag in progress. * @package */ refreshToolboxSelection(): void; /** * Rename a variable by updating its name in the variable map. Update the * flyout to show the renamed variable immediately. * @param {string} id ID of the variable to rename. * @param {string} newName New variable name. * @package */ renameVariableById(id: string, newName: string): void; /** * Delete a variable by the passed in ID. Update the flyout to show * immediately that the variable is deleted. * @param {string} id ID of variable to delete. * @package */ deleteVariableById(id: string): void; /** * Create a new variable with the given name. Update the flyout to show the * new variable immediately. * @param {string} name The new variable's name. * @param {string=} opt_type The type of the variable like 'int' or 'string'. * Does not need to be unique. Field_variable can filter variables based on * their type. This will default to '' which is a specific type. * @param {string=} opt_id The unique ID of the variable. This will default to * a UUID. * @return {Blockly.VariableModel} The newly created variable. * @package */ createVariable(name: string, opt_type?: string, opt_id?: string): Blockly.VariableModel; /** * Make a list of all the delete areas for this workspace. */ recordDeleteAreas(): void; /** * Is the mouse event over a delete area (toolbox or non-closing flyout)? * @param {!Event} e Mouse move event. * @return {?number} Null if not over a delete area, or an enum representing * which delete area the event is over. */ isDeleteArea(e: Event): number; /** * Start tracking a drag of an object on this workspace. * @param {!Event} e Mouse down event. * @param {!Blockly.utils.Coordinate} xy Starting location of object. */ startDrag(e: Event, xy: Blockly.utils.Coordinate): void; /** * Track a drag of an object on this workspace. * @param {!Event} e Mouse move event. * @return {!Blockly.utils.Coordinate} New location of object. */ moveDrag(e: Event): Blockly.utils.Coordinate; /** * Is the user currently dragging a block or scrolling the flyout/workspace? * @return {boolean} True if currently dragging or scrolling. */ isDragging(): boolean; /** * Is this workspace draggable? * @return {boolean} True if this workspace may be dragged. */ isDraggable(): boolean; /** * Should the workspace have bounded content? Used to tell if the * workspace's content should be sized so that it can move (bounded) or not * (exact sizing). * @return {boolean} True if the workspace should be bounded, false otherwise. * @package */ isContentBounded(): boolean; /** * Is this workspace movable? * * This means the user can reposition the X Y coordinates of the workspace * through input. This can be through scrollbars, scroll wheel, dragging, or * through zooming with the scroll wheel (since the zoom is centered on the * mouse position). This does not include zooming with the zoom controls * since the X Y coordinates are decided programmatically. * @return {boolean} True if the workspace is movable, false otherwise. * @package */ isMovable(): boolean; /** * Calculate the bounding box for the blocks on the workspace. * Coordinate system: workspace coordinates. * * @return {!Blockly.utils.Rect} Contains the position and size of the * bounding box containing the blocks on the workspace. */ getBlocksBoundingBox(): Blockly.utils.Rect; /** * Clean up the workspace by ordering all the blocks in a column. */ cleanUp(): void; /** * Modify the block tree on the existing toolbox. * @param {Node|string} tree DOM tree of blocks, or text representation of same. */ updateToolbox(tree: Node|string): void; /** * Mark this workspace as the currently focused main workspace. */ markFocused(): void; /** * Zooms the workspace in or out relative to/centered on the given (x, y) * coordinate. * @param {number} x X coordinate of center, in pixel units relative to the * top-left corner of the parentSVG. * @param {number} y Y coordinate of center, in pixel units relative to the * top-left corner of the parentSVG. * @param {number} amount Amount of zooming. The formula for the new scale * is newScale = currentScale * (scaleSpeed^amount). scaleSpeed is set in * the workspace options. Negative amount values zoom out, and positive * amount values zoom in. */ zoom(x: number, y: number, amount: number): void; /** * Zooming the blocks centered in the center of view with zooming in or out. * @param {number} type Type of zooming (-1 zooming out and 1 zooming in). */ zoomCenter(type: number): void; /** * Zoom the blocks to fit in the workspace if possible. */ zoomToFit(): void; /** * Add a transition class to the block and bubble canvas, to animate any * transform changes. * @package */ beginCanvasTransition(): void; /** * Remove transition class from the block and bubble canvas. * @package */ endCanvasTransition(): void; /** * Center the workspace. */ scrollCenter(): void; /** * Scroll the workspace to center on the given block. * @param {?string} id ID of block center on. * @public */ centerOnBlock(id: string): void; /** * Set the workspace's zoom factor. * @param {number} newScale Zoom factor. Units: (pixels / workspaceUnit). */ setScale(newScale: number): void; /** * Scroll the workspace to a specified offset (in pixels), keeping in the * workspace bounds. See comment on workspaceSvg.scrollX for more detail on * the meaning of these values. * @param {number} x Target X to scroll to. * @param {number} y Target Y to scroll to. * @package */ scroll(x: number, y: number): void; /** * Update whether this workspace has resizes enabled. * If enabled, workspace will resize when appropriate. * If disabled, workspace will not resize until re-enabled. * Use to avoid resizing during a batch operation, for performance. * @param {boolean} enabled Whether resizes should be enabled. */ setResizesEnabled(enabled: boolean): void; /** * Dispose of all blocks in workspace, with an optimization to prevent resizes. */ clear(): void; /** * Register a callback function associated with a given key, for clicks on * buttons and labels in the flyout. * For instance, a button specified by the XML * <button text="create variable" callbackKey="CREATE_VARIABLE"></button> * should be matched by a call to * registerButtonCallback("CREATE_VARIABLE", yourCallbackFunction). * @param {string} key The name to use to look up this function. * @param {function(!Blockly.FlyoutButton)} func The function to call when the * given button is clicked. */ registerButtonCallback(key: string, func: { (_0: Blockly.FlyoutButton): any /*missing*/ }): void; /** * Get the callback function associated with a given key, for clicks on buttons * and labels in the flyout. * @param {string} key The name to use to look up the function. * @return {?function(!Blockly.FlyoutButton)} The function corresponding to the * given key for this workspace; null if no callback is registered. */ getButtonCallback(key: string): { (_0: Blockly.FlyoutButton): any /*missing*/ }; /** * Remove a callback for a click on a button in the flyout. * @param {string} key The name associated with the callback function. */ removeButtonCallback(key: string): void; /** * Register a callback function associated with a given key, for populating * custom toolbox categories in this workspace. See the variable and procedure * categories as an example. * @param {string} key The name to use to look up this function. * @param {function(!Blockly.Workspace):!Array.<!Element>} func The function to * call when the given toolbox category is opened. */ registerToolboxCategoryCallback(key: string, func: { (_0: Blockly.Workspace): Element[] }): void; /** * Get the callback function associated with a given key, for populating * custom toolbox categories in this workspace. * @param {string} key The name to use to look up the function. * @return {?function(!Blockly.Workspace):!Array.<!Element>} The function * corresponding to the given key for this workspace, or null if no function * is registered. */ getToolboxCategoryCallback(key: string): { (_0: Blockly.Workspace): Element[] }; /** * Remove a callback for a click on a custom category's name in the toolbox. * @param {string} key The name associated with the callback function. */ removeToolboxCategoryCallback(key: string): void; /** * Look up the gesture that is tracking this touch stream on this workspace. * May create a new gesture. * @param {!Event} e Mouse event or touch event. * @return {Blockly.TouchGesture} The gesture that is tracking this touch * stream, or null if no valid gesture exists. * @package */ getGesture(e: Event): Blockly.TouchGesture; /** * Clear the reference to the current gesture. * @package */ clearGesture(): void; /** * Cancel the current gesture, if one exists. * @package */ cancelCurrentGesture(): void; /** * Get the audio manager for this workspace. * @return {!Blockly.WorkspaceAudio} The audio manager for this workspace. */ getAudioManager(): Blockly.WorkspaceAudio; /** * Get the grid object for this workspace, or null if there is none. * @return {Blockly.Grid} The grid object for this workspace. * @package */ getGrid(): Blockly.Grid; } } declare module Blockly.Events { class CommentBase extends CommentBase__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class CommentBase__Class extends Blockly.Events.Abstract__Class { /** * Abstract class for a comment event. * @param {Blockly.WorkspaceComment} comment The comment this event corresponds * to. * @extends {Blockly.Events.Abstract} * @constructor */ constructor(comment: Blockly.WorkspaceComment); /** * The ID of the comment this event pertains to. * @type {string} */ commentId: string; /** * The workspace identifier for this event. * @type {string} */ workspaceId: string; /** * The event group id for the group this event belongs to. Groups define * events that should be treated as an single action from the user's * perspective, and should be undone together. * @type {string} */ group: string; /** * Sets whether the event should be added to the undo stack. * @type {boolean} */ recordUndo: boolean; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; } class CommentChange extends CommentChange__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class CommentChange__Class extends Blockly.Events.CommentBase__Class { /** * Class for a comment change event. * @param {Blockly.WorkspaceComment} comment The comment that is being changed. * Null for a blank event. * @param {string} oldContents Previous contents of the comment. * @param {string} newContents New contents of the comment. * @extends {Blockly.Events.CommentBase} * @constructor */ constructor(comment: Blockly.WorkspaceComment, oldContents: string, newContents: string); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Does this event record any change of state? * @return {boolean} False if something changed. */ isNull(): boolean; /** * Run a change event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class CommentCreate extends CommentCreate__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class CommentCreate__Class extends Blockly.Events.CommentBase__Class { /** * Class for a comment creation event. * @param {Blockly.WorkspaceComment} comment The created comment. * Null for a blank event. * @extends {Blockly.Events.CommentBase} * @constructor */ constructor(comment: Blockly.WorkspaceComment); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a creation event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class CommentDelete extends CommentDelete__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class CommentDelete__Class extends Blockly.Events.CommentBase__Class { /** * Class for a comment deletion event. * @param {Blockly.WorkspaceComment} comment The deleted comment. * Null for a blank event. * @extends {Blockly.Events.CommentBase} * @constructor */ constructor(comment: Blockly.WorkspaceComment); /** * Type of this event. * @type {string} */ type: string; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Run a creation event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } class CommentMove extends CommentMove__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class CommentMove__Class extends Blockly.Events.CommentBase__Class { /** * Class for a comment move event. Created before the move. * @param {Blockly.WorkspaceComment} comment The comment that is being moved. * Null for a blank event. * @extends {Blockly.Events.CommentBase} * @constructor */ constructor(comment: Blockly.WorkspaceComment); /** * The comment that is being moved. Will be cleared after recording the new * location. * @type {Blockly.WorkspaceComment} */ comment_: Blockly.WorkspaceComment; /** * The location before the move, in workspace coordinates. * @type {!Blockly.utils.Coordinate} */ oldCoordinate_: Blockly.utils.Coordinate; /** * The location after the move, in workspace coordinates. * @type {Blockly.utils.Coordinate} */ newCoordinate_: Blockly.utils.Coordinate; /** * Record the comment's new location. Called after the move. Can only be * called once. */ recordNew(): void; /** * Type of this event. * @type {string} */ type: string; /** * Override the location before the move. Use this if you don't create the * event until the end of the move, but you know the original location. * @param {!Blockly.utils.Coordinate} xy The location before the move, * in workspace coordinates. */ setOldCoordinate(xy: Blockly.utils.Coordinate): void; /** * Encode the event as JSON. * @return {!Object} JSON representation. */ toJson(): Object; /** * Decode the JSON event. * @param {!Object} json JSON representation. */ fromJson(json: Object): void; /** * Does this event record any change of state? * @return {boolean} False if something changed. */ isNull(): boolean; /** * Run a move event. * @param {boolean} forward True if run forward, false if run backward (undo). */ run(forward: boolean): void; } /** * Helper function for Comment[Create|Delete] * @param {!Blockly.Events.CommentCreate|!Blockly.Events.CommentDelete} event * The event to run. * @param {boolean} create if True then Create, if False then Delete */ function CommentCreateDeleteHelper(event: Blockly.Events.CommentCreate|Blockly.Events.CommentDelete, create: boolean): void; } declare module Blockly.Xml { /** * Encode a block tree as XML. * @param {!Blockly.Workspace} workspace The workspace containing blocks. * @param {boolean=} opt_noId True if the encoder should skip the block IDs. * @return {!Element} XML document. */ function workspaceToDom(workspace: Blockly.Workspace, opt_noId?: boolean): Element; /** * Encode a list of variables as XML. * @param {!Array.<!Blockly.VariableModel>} variableList List of all variable * models. * @return {!Element} List of XML elements. */ function variablesToDom(variableList: Blockly.VariableModel[]): Element; /** * Encode a block subtree as XML with XY coordinates. * @param {!Blockly.Block} block The root block to encode. * @param {boolean=} opt_noId True if the encoder should skip the block ID. * @return {!Element} Tree of XML elements. */ function blockToDomWithXY(block: Blockly.Block, opt_noId?: boolean): Element; /** * Encode a block subtree as XML. * @param {!Blockly.Block} block The root block to encode. * @param {boolean=} opt_noId True if the encoder should skip the block ID. * @return {!Element} Tree of XML elements. */ function blockToDom(block: Blockly.Block, opt_noId?: boolean): Element; /** * Converts a DOM structure into plain text. * Currently the text format is fairly ugly: all one line with no whitespace, * unless the DOM itself has whitespace built-in. * @param {!Element} dom A tree of XML elements. * @return {string} Text representation. */ function domToText(dom: Element): string; /** * Converts a DOM structure into properly indented text. * @param {!Element} dom A tree of XML elements. * @return {string} Text representation. */ function domToPrettyText(dom: Element): string; /** * Converts an XML string into a DOM structure. * @param {string} text An XML string. * @return {!Element} A DOM object representing the singular child of the * document element. * @throws if the text doesn't parse. */ function textToDom(text: string): Element; /** * Clear the given workspace then decode an XML DOM and * create blocks on the workspace. * @param {!Element} xml XML DOM. * @param {!Blockly.Workspace} workspace The workspace. * @return {Array.<string>} An array containing new block ids. */ function clearWorkspaceAndLoadFromXml(xml: Element, workspace: Blockly.Workspace): string[]; /** * Decode an XML DOM and create blocks on the workspace. * @param {!Element} xml XML DOM. * @param {!Blockly.Workspace} workspace The workspace. * @return {!Array.<string>} An array containing new block IDs. */ function domToWorkspace(xml: Element, workspace: Blockly.Workspace): string[]; /** * Decode an XML DOM and create blocks on the workspace. Position the new * blocks immediately below prior blocks, aligned by their starting edge. * @param {!Element} xml The XML DOM. * @param {!Blockly.Workspace} workspace The workspace to add to. * @return {Array.<string>} An array containing new block IDs. */ function appendDomToWorkspace(xml: Element, workspace: Blockly.Workspace): string[]; /** * Decode an XML block tag and create a block (and possibly sub blocks) on the * workspace. * @param {!Element} xmlBlock XML block element. * @param {!Blockly.Workspace} workspace The workspace. * @return {!Blockly.Block} The root block created. */ function domToBlock(xmlBlock: Element, workspace: Blockly.Workspace): Blockly.Block; /** * Decode an XML list of variables and add the variables to the workspace. * @param {!Element} xmlVariables List of XML variable elements. * @param {!Blockly.Workspace} workspace The workspace to which the variable * should be added. */ function domToVariables(xmlVariables: Element, workspace: Blockly.Workspace): void; /** * Remove any 'next' block (statements in a stack). * @param {!Element} xmlBlock XML block element. */ function deleteNext(xmlBlock: Element): void; } declare module Blockly { class ZoomControls extends ZoomControls__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ZoomControls__Class { /** * Class for a zoom controls. * @param {!Blockly.WorkspaceSvg} workspace The workspace to sit in. * @constructor */ constructor(workspace: Blockly.WorkspaceSvg); /** * Create the zoom controls. * @return {!SVGElement} The zoom controls SVG group. */ createDom(): SVGElement; /** * Initialize the zoom controls. * @param {number} verticalSpacing Vertical distances from workspace edge to the * same edge of the controls. * @return {number} Vertical distance from workspace edge to the opposite * edge of the controls. */ init(verticalSpacing: number): number; /** * Dispose of this zoom controls. * Unlink from all DOM elements to prevent memory leaks. */ dispose(): void; /** * Position the zoom controls. * It is positioned in the opposite corner to the corner the * categories/toolbox starts at. */ position(): void; } } declare module Blockly { class Component extends Component__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Component__Class { /** * Default implementation of a UI component. * Similar to Closure's goog.ui.Component. * * @constructor */ constructor(); /** * Gets the unique ID for the instance of this component. If the instance * doesn't already have an ID, generates one on the fly. * @return {string} Unique component ID. * @package */ getId(): string; /** * Gets the component's element. * @return {Element} The element for the component. * @package */ getElement(): Element; /** * Sets the component's root element to the given element. Considered * protected and final. * * This should generally only be called during createDom. Setting the element * does not actually change which element is rendered, only the element that is * associated with this UI component. * * This should only be used by subclasses and its associated renderers. * * @param {Element} element Root element for the component. * @protected */ setElementInternal(element: Element): void; /** * Sets the parent of this component to use for event bubbling. Throws an error * if the component already has a parent or if an attempt is made to add a * component to itself as a child. * @param {Blockly.Component} parent The parent component. * @protected */ setParent(parent: Blockly.Component): void; /** * Returns the component's parent, if any. * @return {?Blockly.Component} The parent component. * @protected */ getParent(): Blockly.Component; /** * Determines whether the component has been added to the document. * @return {boolean} TRUE if rendered. Otherwise, FALSE. * @protected */ isInDocument(): boolean; /** * Creates the initial DOM representation for the component. The default * implementation is to set this.element_ = div. * @protected */ createDom(): void; /** * Renders the component. If a parent element is supplied, the component's * element will be appended to it. If there is no optional parent element and * the element doesn't have a parentNode then it will be appended to the * document body. * * If this component has a parent component, and the parent component is * not in the document already, then this will not call `enterDocument` * on this component. * * Throws an Error if the component is already rendered. * * @param {Element=} opt_parentElement Optional parent element to render the * component into. * @package */ render(opt_parentElement?: Element): void; /** * Renders the component before another element. The other element should be in * the document already. * * Throws an Error if the component is already rendered. * * @param {Node} sibling Node to render the component before. * @protected */ renderBefore(sibling: Node): void; /** * Called when the component's element is known to be in the document. Anything * using document.getElementById etc. should be done at this stage. * * If the component contains child components, this call is propagated to its * children. * @protected */ enterDocument(): void; /** * Called by dispose to clean up the elements and listeners created by a * component, or by a parent component/application who has removed the * component from the document but wants to reuse it later. * * If the component contains child components, this call is propagated to its * children. * * It should be possible for the component to be rendered again once this method * has been called. * @protected */ exitDocument(): void; /** * Disposes of the object. If the object hasn't already been disposed of, calls * {@link #disposeInternal}. * @protected */ dispose(): void; /** * Disposes of the component. Calls `exitDocument`, which is expected to * remove event handlers and clean up the component. Propagates the call to * the component's children, if any. Removes the component's DOM from the * document. * @protected */ disposeInternal(): void; /** * Adds the specified component as the last child of this component. See * {@link Blockly.Component#addChildAt} for detailed semantics. * * @see Blockly.Component#addChildAt * @param {Blockly.Component} child The new child component. * @param {boolean=} opt_render If true, the child component will be rendered * into the parent. * @package */ addChild(child: Blockly.Component, opt_render?: boolean): void; /** * Adds the specified component as a child of this component at the given * 0-based index. * * Both `addChild` and `addChildAt` assume the following contract * between parent and child components: * <ul> * <li>the child component's element must be a descendant of the parent * component's element, and * <li>the DOM state of the child component must be consistent with the DOM * state of the parent component (see `isInDocument`) in the * steady state -- the exception is to addChildAt(child, i, false) and * then immediately decorate/render the child. * </ul> * * In particular, `parent.addChild(child)` will throw an error if the * child component is already in the document, but the parent isn't. * * Clients of this API may call `addChild` and `addChildAt` with * `opt_render` set to true. If `opt_render` is true, calling these * methods will automatically render the child component's element into the * parent component's element. If the parent does not yet have an element, then * `createDom` will automatically be invoked on the parent before * rendering the child. * * Invoking {@code parent.addChild(child, true)} will throw an error if the * child component is already in the document, regardless of the parent's DOM * state. * * If `opt_render` is true and the parent component is not already * in the document, `enterDocument` will not be called on this component * at this point. * * Finally, this method also throws an error if the new child already has a * different parent, or the given index is out of bounds. * * @see Blockly.Component#addChild * @param {Blockly.Component} child The new child component. * @param {number} index 0-based index at which the new child component is to be * added; must be between 0 and the current child count (inclusive). * @param {boolean=} opt_render If true, the child component will be rendered * into the parent. * @protected */ addChildAt(child: Blockly.Component, index: number, opt_render?: boolean): void; /** * Returns the DOM element into which child components are to be rendered, * or null if the component itself hasn't been rendered yet. This default * implementation returns the component's root element. Subclasses with * complex DOM structures must override this method. * @return {Element} Element to contain child elements (null if none). * @protected */ getContentElement(): Element; /** * Returns true if the component is rendered right-to-left, false otherwise. * The first time this function is invoked, the right-to-left rendering property * is set if it has not been already. * @return {boolean} Whether the control is rendered right-to-left. * @protected */ isRightToLeft(): boolean; /** * Set is right-to-left. This function should be used if the component needs * to know the rendering direction during DOM creation (i.e. before * {@link #enterDocument} is called and is right-to-left is set). * @param {boolean} rightToLeft Whether the component is rendered * right-to-left. * @package */ setRightToLeft(rightToLeft: boolean): void; /** * Returns true if the component has children. * @return {boolean} True if the component has children. * @protected */ hasChildren(): boolean; /** * Returns the number of children of this component. * @return {number} The number of children. * @protected */ getChildCount(): number; /** * Returns the child with the given ID, or null if no such child exists. * @param {string} id Child component ID. * @return {?Blockly.Component} The child with the given ID; null if none. * @protected */ getChild(id: string): Blockly.Component; /** * Returns the child at the given index, or null if the index is out of bounds. * @param {number} index 0-based index. * @return {?Blockly.Component} The child at the given index; null if none. * @protected */ getChildAt(index: number): Blockly.Component; /** * Calls the given function on each of this component's children in order. If * `opt_obj` is provided, it will be used as the 'this' object in the * function when called. The function should take two arguments: the child * component and its 0-based index. The return value is ignored. * @param {function(this:T,?,number):?} f The function to call for every * child component; should take 2 arguments (the child and its index). * @param {T=} opt_obj Used as the 'this' object in f when called. * @template T * @protected */ forEachChild<T>(f: { (_0: any, _1: number): any }, opt_obj?: T): void; /** * Returns the 0-based index of the given child component, or -1 if no such * child is found. * @param {?Blockly.Component} child The child component. * @return {number} 0-based index of the child component; -1 if not found. * @protected */ indexOfChild(child: Blockly.Component): number; } } declare module Blockly.Component { /** * The default right to left value. * @type {boolean} * @package */ var defaultRightToLeft: boolean; /** * Errors thrown by the component. * @enum {string} */ enum Error { ALREADY_RENDERED, PARENT_UNABLE_TO_BE_SET, CHILD_INDEX_OUT_OF_BOUNDS } } declare module Blockly { class Action extends Action__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Action__Class { /** * Class for a single action. * An action describes user intent. (ex go to next or go to previous) * @param {string} name The name of the action. * @param {string} desc The description of the action. * @constructor */ constructor(name: string, desc: string); } } declare module Blockly { class ASTNode extends ASTNode__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ASTNode__Class { /** * Class for an AST node. * It is recommended that you use one of the createNode methods instead of * creating a node directly. * @param {string} type The type of the location. * Must be in Bockly.ASTNode.types. * @param {Blockly.Block|Blockly.Connection|Blockly.Field|Blockly.Workspace} * location The position in the AST. * @param {!Object=} opt_params Optional dictionary of options. * @constructor */ constructor(type: string, location: Blockly.Block|Blockly.Connection|Blockly.Field|Blockly.Workspace, opt_params?: Object); /** * Gets the value pointed to by this node. * It is the callers responsibility to check the node type to figure out what * type of object they get back from this. * @return {!(Blockly.Field|Blockly.Connection|Blockly.Block|Blockly.Workspace)} * The current field, connection, workspace, or block the cursor is on. */ getLocation(): Blockly.Field|Blockly.Connection|Blockly.Block|Blockly.Workspace; /** * The type of the current location. * One of Blockly.ASTNode.types * @return {string} The type of the location. */ getType(): string; /** * The coordinate on the workspace. * @return {Blockly.utils.Coordinate} The workspace coordinate or null if the * location is not a workspace. */ getWsCoordinate(): Blockly.utils.Coordinate; /** * Whether the node points to a connection. * @return {boolean} [description] * @package */ isConnection(): boolean; /** * Find the element to the right of the current element in the AST. * @return {Blockly.ASTNode} An AST node that wraps the next field, connection, * block, or workspace. Or null if there is no node to the right. */ next(): Blockly.ASTNode; /** * Find the element one level below and all the way to the left of the current * location. * @return {Blockly.ASTNode} An AST node that wraps the next field, connection, * workspace, or block. Or null if there is nothing below this node. */ in(): Blockly.ASTNode; /** * Find the element to the left of the current element in the AST. * @return {Blockly.ASTNode} An AST node that wraps the previous field, * connection, workspace or block. Or null if no node exists to the left. * null. */ prev(): Blockly.ASTNode; /** * Find the next element that is one position above and all the way to the left * of the current location. * @return {Blockly.ASTNode} An AST node that wraps the next field, connection, * workspace or block. Or null if we are at the workspace level. */ out(): Blockly.ASTNode; } } declare module Blockly.ASTNode { /** * Object holding different types for an AST node. * @enum {string} */ enum types { FIELD, BLOCK, INPUT, OUTPUT, NEXT, PREVIOUS, STACK, WORKSPACE } /** * Create an AST node pointing to a field. * @param {!Blockly.Field} field The location of the AST node. * @return {!Blockly.ASTNode} An AST node pointing to a field. */ function createFieldNode(field: Blockly.Field): Blockly.ASTNode; /** * Creates an AST node pointing to a connection. If the connection has a parent * input then create an AST node of type input that will hold the connection. * @param {Blockly.Connection} connection This is the connection the node will * point to. * @return {Blockly.ASTNode} An AST node pointing to a connection. */ function createConnectionNode(connection: Blockly.Connection): Blockly.ASTNode; /** * Creates an AST node pointing to an input. Stores the input connection as the * location. * @param {Blockly.Input} input The input used to create an AST node. * @return {!Blockly.ASTNode} An AST node pointing to a input. */ function createInputNode(input: Blockly.Input): Blockly.ASTNode; /** * Creates an AST node pointing to a block. * @param {!Blockly.Block} block The block used to create an AST node. * @return {!Blockly.ASTNode} An AST node pointing to a block. */ function createBlockNode(block: Blockly.Block): Blockly.ASTNode; /** * Create an AST node of type stack. A stack, represented by its top block, is * the set of all blocks connected to a top block, including the top block. * @param {!Blockly.Block} topBlock A top block has no parent and can be found * in the list returned by workspace.getTopBlocks(). * @return {!Blockly.ASTNode} An AST node of type stack that points to the top * block on the stack. */ function createStackNode(topBlock: Blockly.Block): Blockly.ASTNode; /** * Creates an AST node pointing to a workspace. * @param {!Blockly.Workspace} workspace The workspace that we are on. * @param {Blockly.utils.Coordinate} wsCoordinate The position on the workspace * for this node. * @return {!Blockly.ASTNode} An AST node pointing to a workspace and a position * on the workspace. */ function createWorkspaceNode(workspace: Blockly.Workspace, wsCoordinate: Blockly.utils.Coordinate): Blockly.ASTNode; } declare module Blockly { class Cursor extends Cursor__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Cursor__Class { /** * Class for a cursor. * A cursor controls how a user navigates the Blockly AST. * @constructor */ constructor(); /** * Sets the object in charge of drawing the cursor. * @param {Blockly.CursorSvg} drawer The object in charge of drawing the cursor. */ setDrawer(drawer: Blockly.CursorSvg): void; /** * Get the current drawer for the cursor. * @return {Blockly.CursorSvg} The object in charge of drawing the cursor. */ getDrawer(): Blockly.CursorSvg; /** * Gets the current location of the cursor. * @return {Blockly.ASTNode} The current field, connection, or block the cursor * is on. */ getCurNode(): Blockly.ASTNode; /** * Set the location of the cursor and call the update method. * Setting isStack to true will only work if the newLocation is the top most * output or previous connection on a stack. * @param {Blockly.ASTNode} newNode The new location of the cursor. */ setCurNode(newNode: Blockly.ASTNode): void; /** * Hide the cursor SVG. */ hide(): void; /** * Find the next connection, field, or block. * @return {Blockly.ASTNode} The next element, or null if the current node is * not set or there is no next value. */ next(): Blockly.ASTNode; /** * Find the in connection or field. * @return {Blockly.ASTNode} The in element, or null if the current node is * not set or there is no in value. */ in(): Blockly.ASTNode; /** * Find the previous connection, field, or block. * @return {Blockly.ASTNode} The previous element, or null if the current node * is not set or there is no previous value. */ prev(): Blockly.ASTNode; /** * Find the out connection, field, or block. * @return {Blockly.ASTNode} The out element, or null if the current node is * not set or there is no out value. */ out(): Blockly.ASTNode; } } declare module Blockly { class CursorSvg extends CursorSvg__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class CursorSvg__Class { /** * Class for a cursor. * @param {!Blockly.WorkspaceSvg} workspace The workspace the cursor belongs to. * @param {boolean=} opt_marker True if the cursor is a marker. A marker is used * to save a location and is an immovable cursor. False or undefined if the * cursor is not a marker. * @constructor */ constructor(workspace: Blockly.WorkspaceSvg, opt_marker?: boolean); /** * The current SVG element for the cursor. * @type {Element} */ currentCursorSvg: Element; /** * Return the root node of the SVG or null if none exists. * @return {SVGElement} The root SVG node. */ getSvgRoot(): SVGElement; /** * Create the DOM element for the cursor. * @return {!SVGElement} The cursor controls SVG group. * @package */ createDom(): SVGElement; /** * Position the cursor for a block. * Displays an outline of the top half of a rectangle around a block. * @param {number} width The width of the block. * @param {number} cursorOffset The extra padding for around the block. * @param {number} cursorHeight The height of the cursor. */ positionBlock_(width: number, cursorOffset: number, cursorHeight: number): void; /** * Hide the cursor. * @package */ hide(): void; /** * Update the cursor. * @param {Blockly.ASTNode} curNode The node that we want to draw the cursor for. * @package */ draw(curNode: Blockly.ASTNode): void; /** * Dispose of this cursor. * @package */ dispose(): void; } } declare module Blockly.CursorSvg { /** * Height of the horizontal cursor. * @type {number} * @const */ var CURSOR_HEIGHT: number; /** * Width of the horizontal cursor. * @type {number} * @const */ var CURSOR_WIDTH: number; /** * The start length of the notch. * @type {number} * @const */ var NOTCH_START_LENGTH: number; /** * Padding around the input. * @type {number} * @const */ var VERTICAL_PADDING: number; /** * Padding around a stack. * @type {number} * @const */ var STACK_PADDING: number; /** * Padding around a block. * @type {number} * @const */ var BLOCK_PADDING: number; /** * What we multiply the height by to get the height of the cursor. * Only used for the block and block connections. * @type {number} * @const */ var HEIGHT_MULTIPLIER: number; /** * Cursor color. * @type {string} * @const */ var CURSOR_COLOR: string; /** * Immovable marker color. * @type {string} * @const */ var MARKER_COLOR: string; /** * The name of the CSS class for a cursor. * @const {string} */ var CURSOR_CLASS: any /*missing*/; /** * The name of the CSS class for a marker. * @const {string} */ var MARKER_CLASS: any /*missing*/; } declare module Blockly { class FlyoutCursor extends FlyoutCursor__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class FlyoutCursor__Class extends Blockly.Cursor__Class { /** * Class for a flyout cursor. * This controls how a user navigates blocks in the flyout. * @constructor * @extends {Blockly.Cursor} */ constructor(); } } declare module Blockly.user.keyMap { /** * Holds the serialized key to key action mapping. * @type {Object<string, Blockly.Action>} */ var map_: { [key: string]: Blockly.Action }; /** * Object holding valid modifiers. * @enum {string} */ enum modifierKeys { SHIFT, CONTROL, ALT, META } /** * Update the key map to contain the new action. * @param {string} keyCode The key code serialized by the serializeKeyEvent. * @param {!Blockly.Action} action The action to be executed when the keys * corresponding to the serialized key code is pressed. * @package */ function setActionForKey(keyCode: string, action: Blockly.Action): void; /** * Creates a new key map. * @param {Object<string, Blockly.Action>} keyMap The object holding the key * to action mapping. * @package */ function setKeyMap(keyMap: { [key: string]: Blockly.Action }): void; /** * Gets the current key map. * @return {Object<string,Blockly.Action>} The object holding the key to * action mapping. * @package */ function getKeyMap(): { [key: string]: Blockly.Action }; /** * Get the action by the serialized key code. * @param {string} keyCode The serialized key code. * @return {Blockly.Action|undefined} The action holding the function to * call when the given keyCode is used or undefined if no action exists. * @package */ function getActionByKeyCode(keyCode: string): Blockly.Action|any /*undefined*/; /** * Get the serialized key that corresponds to the action. * @param {!Blockly.Action} action The action for which we want to get * the key. * @return {?string} The serialized key or null if the action does not have * a key mapping. * @package */ function getKeyByAction(action: Blockly.Action): string; /** * Serialize the key event. * @param {!Event} e A key up event holding the key code. * @return {string} A string containing the serialized key event. */ function serializeKeyEvent(e: Event): string; /** * Create the serialized key code that will be used in the key map. * @param {number} keyCode Number code representing the key. * @param {!Array.<string>} modifiers List of modifiers to be used with the key. * All valid modifiers can be found in the Blockly.user.keyMap.modifierKeys. * @return {string} The serialized key code for the given modifiers and key. */ function createSerializedKey(keyCode: number, modifiers: string[]): string; /** * Creates the default key map. * @return {!Object<string,Blockly.Action>} An object holding the default key * to action mapping. */ function createDefaultKeyMap(): { [key: string]: Blockly.Action }; } declare module Blockly { class MarkerCursor extends MarkerCursor__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class MarkerCursor__Class extends Blockly.Cursor__Class { /** * Class for a marker. * This is used in keyboard navigation to save a location in the Blockly AST. * @constructor * @extends {Blockly.Cursor} */ constructor(); } } declare module Blockly.navigation { /** * A function to call to give feedback to the user about logs, warnings, and * errors. You can override this to customize feedback (e.g. warning sounds, * reading out the warning text, etc). * Null by default. * The first argument is one of 'log', 'warn', and 'error'. * The second argument is the message. * @type {?function(string, string)} * @public */ var loggingCallback: { (_0: string, _1: string): any /*missing*/ }; /** * State indicating focus is currently on the flyout. * @type {number} */ var STATE_FLYOUT: number; /** * State indicating focus is currently on the workspace. * @type {number} */ var STATE_WS: number; /** * State indicating focus is currently on the toolbox. * @type {number} */ var STATE_TOOLBOX: number; /** * Object holding default action names. * @enum {string} */ enum actionNames { PREVIOUS, NEXT, IN, OUT, INSERT, MARK, DISCONNECT, TOOLBOX, EXIT, TOGGLE_KEYBOARD_NAV } /** * If there is a marked connection try connecting the block from the flyout to * that connection. If no connection has been marked then inserting will place * it on the workspace. */ function insertFromFlyout(): void; /** * Tries to connect the given block to the destination connection, making an * intelligent guess about which connection to use to on the moving block. * @param {!Blockly.Block} block The block to move. * @param {Blockly.Connection} destConnection The connection to connect to. * @return {boolean} Whether the connection was successful. */ function insertBlock(block: Blockly.Block, destConnection: Blockly.Connection): boolean; /** * Set the current navigation state. * @param {number} newState The new navigation state. * @package */ function setState(newState: number): void; /** * Gets the top node on a block. * This is either the previous connection, output connection or the block. * @param {!Blockly.Block} block The block to find the top most AST node on. * @return {Blockly.ASTNode} The AST node holding the top most node on the * block. * @package */ function getTopNode(block: Blockly.Block): Blockly.ASTNode; /** * Before a block is deleted move the cursor to the appropriate position. * @param {!Blockly.Block} deletedBlock The block that is being deleted. */ function moveCursorOnBlockDelete(deletedBlock: Blockly.Block): void; /** * When a block that the cursor is on is mutated move the cursor to the block * level. * @param {!Blockly.Block} mutatedBlock The block that is being mutated. * @package */ function moveCursorOnBlockMutation(mutatedBlock: Blockly.Block): void; /** * Enable accessibility mode. */ function enableKeyboardAccessibility(): void; /** * Disable accessibility mode. */ function disableKeyboardAccessibility(): void; /** * Handler for all the keyboard navigation events. * @param {!Event} e The keyboard event. * @return {boolean} True if the key was handled false otherwise. */ function onKeyPress(e: Event): boolean; /** * Execute any actions on the flyout, workspace, or toolbox that correspond to * the given action. * @param {!Blockly.Action} action The current action. * @return {boolean} True if the action has been handled, false otherwise. */ function onBlocklyAction(action: Blockly.Action): boolean; /** * The previous action. * @type {!Blockly.Action} */ var ACTION_PREVIOUS: Blockly.Action; /** * The out action. * @type {!Blockly.Action} */ var ACTION_OUT: Blockly.Action; /** * The next action. * @type {!Blockly.Action} */ var ACTION_NEXT: Blockly.Action; /** * The in action. * @type {!Blockly.Action} */ var ACTION_IN: Blockly.Action; /** * The action to try to insert a block. * @type {!Blockly.Action} */ var ACTION_INSERT: Blockly.Action; /** * The action to mark a certain location. * @type {!Blockly.Action} */ var ACTION_MARK: Blockly.Action; /** * The action to disconnect a block. * @type {!Blockly.Action} */ var ACTION_DISCONNECT: Blockly.Action; /** * The action to open the toolbox. * @type {!Blockly.Action} */ var ACTION_TOOLBOX: Blockly.Action; /** * The action to exit the toolbox or flyout. * @type {!Blockly.Action} */ var ACTION_EXIT: Blockly.Action; /** * The action to toggle keyboard navigation mode on and off. * @type {!Blockly.Action} */ var ACTION_TOGGLE_KEYBOARD_NAV: Blockly.Action; /** * List of actions that can be performed in read only mode. * @type {!Array.<!Blockly.Action>} */ var READONLY_ACTION_LIST: Blockly.Action[]; } declare module Blockly.utils.aria { /** * ARIA role values. * Copied from Closure's goog.a11y.aria.Role * @enum {string} */ enum Role { ALERT, ALERTDIALOG, APPLICATION, ARTICLE, BANNER, BUTTON, CHECKBOX, COLUMNHEADER, COMBOBOX, COMPLEMENTARY, CONTENTINFO, DEFINITION, DIALOG, DIRECTORY, DOCUMENT, FORM, GRID, GRIDCELL, GROUP, HEADING, IMG, LINK, LIST, LISTBOX, LISTITEM, LOG, MAIN, MARQUEE, MATH, MENU, MENUBAR, MENUITEM, MENUITEMCHECKBOX, MENUITEMRADIO, NAVIGATION, NOTE, OPTION, PRESENTATION, PROGRESSBAR, RADIO, RADIOGROUP, REGION, ROW, ROWGROUP, ROWHEADER, SCROLLBAR, SEARCH, SEPARATOR, SLIDER, SPINBUTTON, STATUS, TAB, TABLE, TABLIST, TABPANEL, TEXTBOX, TEXTINFO, TIMER, TOOLBAR, TOOLTIP, TREE, TREEGRID, TREEITEM } /** * ARIA states and properties. * Copied from Closure's goog.a11y.aria.State * @enum {string} */ enum State { ACTIVEDESCENDANT, ATOMIC, AUTOCOMPLETE, BUSY, CHECKED, COLINDEX, CONTROLS, DESCRIBEDBY, DISABLED, DROPEFFECT, EXPANDED, FLOWTO, GRABBED, HASPOPUP, HIDDEN, INVALID, LABEL, LABELLEDBY, LEVEL, LIVE, MULTILINE, MULTISELECTABLE, ORIENTATION, OWNS, POSINSET, PRESSED, READONLY, RELEVANT, REQUIRED, ROWINDEX, SELECTED, SETSIZE, SORT, VALUEMAX, VALUEMIN, VALUENOW, VALUETEXT } /** * Sets the role of an element. If the roleName is * empty string or null, the role for the element is removed. * We encourage clients to call the goog.a11y.aria.removeRole * method instead of setting null and empty string values. * Special handling for this case is added to ensure * backword compatibility with existing code. * * Similar to Closure's goog.a11y.aria * * @param {!Element} element DOM node to set role of. * @param {!Blockly.utils.aria.Role|string} roleName role name(s). */ function setRole(element: Element, roleName: Blockly.utils.aria.Role|string): void; /** * Gets role of an element. * Copied from Closure's goog.a11y.aria * @param {!Element} element DOM element to get role of. * @return {?Blockly.utils.aria.Role} ARIA Role name. */ function getRole(element: Element): Blockly.utils.aria.Role; /** * Removes role of an element. * Copied from Closure's goog.a11y.aria * @param {!Element} element DOM element to remove the role from. */ function removeRole(element: Element): void; /** * Sets the state or property of an element. * Copied from Closure's goog.a11y.aria * @param {!Element} element DOM node where we set state. * @param {!(Blockly.utils.aria.State|string)} stateName State attribute being set. * Automatically adds prefix 'aria-' to the state name if the attribute is * not an extra attribute. * @param {string|boolean|number|!Array.<string>} value Value * for the state attribute. */ function setState(element: Element, stateName: Blockly.utils.aria.State|string, value: string|boolean|number|string[]): void; } declare module Blockly.utils.colour { /** * Parses a colour from a string. * .parse('red') -> '#ff0000' * .parse('#f00') -> '#ff0000' * .parse('#ff0000') -> '#ff0000' * .parse('rgb(255, 0, 0)') -> '#ff0000' * @param {string} str Colour in some CSS format. * @return {string|null} A string containing a hex representation of the colour, * or null if can't be parsed. */ function parse(str: string): string|any /*null*/; /** * Converts a colour from RGB to hex representation. * @param {number} r Amount of red, int between 0 and 255. * @param {number} g Amount of green, int between 0 and 255. * @param {number} b Amount of blue, int between 0 and 255. * @return {string} Hex representation of the colour. */ function rgbToHex(r: number, g: number, b: number): string; /** * Converts a hex representation of a colour to RGB. * @param {string} hexColor Colour in '#ff0000' format. * @return {!Array.<number>} RGB representation of the colour. */ function hexToRgb(hexColor: string): number[]; /** * Converts an HSV triplet to hex representation. * @param {number} h Hue value in [0, 360]. * @param {number} s Saturation value in [0, 1]. * @param {number} v Brightness in [0, 255]. * @return {string} Hex representation of the colour. */ function hsvToHex(h: number, s: number, v: number): string; /** * Blend two colours together, using the specified factor to indicate the * weight given to the first colour. * @param {string} colour1 First colour. * @param {string} colour2 Second colour. * @param {number} factor The weight to be given to colour1 over colour2. * Values should be in the range [0, 1]. * @return {string} Combined colour represented in hex. */ function blend(colour1: string, colour2: string, factor: number): string; /** * A map that contains the 16 basic colour keywords as defined by W3C: * https://www.w3.org/TR/2018/REC-css-color-3-20180619/#html4 * The keys of this map are the lowercase "readable" names of the colours, * while the values are the "hex" values. * * @type {!Object<string, string>} */ var names: { [key: string]: string }; } declare module Blockly.utils { class Coordinate extends Coordinate__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Coordinate__Class { /** * Class for representing coordinates and positions. * @param {number} x Left. * @param {number} y Top. * @struct * @constructor */ constructor(x: number, y: number); /** * X-value * @type {number} */ x: number; /** * Y-value * @type {number} */ y: number; /** * Scales this coordinate by the given scale factor. * @param {number} s The scale factor to use for both x and y dimensions. * @return {!Blockly.utils.Coordinate} This coordinate after scaling. */ scale(s: number): Blockly.utils.Coordinate; /** * Translates this coordinate by the given offsets. * respectively. * @param {number} tx The value to translate x by. * @param {number} ty The value to translate y by. * @return {!Blockly.utils.Coordinate} This coordinate after translating. */ translate(tx: number, ty: number): Blockly.utils.Coordinate; } } declare module Blockly.utils.Coordinate { /** * Compares coordinates for equality. * @param {Blockly.utils.Coordinate} a A Coordinate. * @param {Blockly.utils.Coordinate} b A Coordinate. * @return {boolean} True iff the coordinates are equal, or if both are null. */ function equals(a: Blockly.utils.Coordinate, b: Blockly.utils.Coordinate): boolean; /** * Returns the distance between two coordinates. * @param {!Blockly.utils.Coordinate} a A Coordinate. * @param {!Blockly.utils.Coordinate} b A Coordinate. * @return {number} The distance between `a` and `b`. */ function distance(a: Blockly.utils.Coordinate, b: Blockly.utils.Coordinate): number; /** * Returns the magnitude of a coordinate. * @param {!Blockly.utils.Coordinate} a A Coordinate. * @return {number} The distance between the origin and `a`. */ function magnitude(a: Blockly.utils.Coordinate): number; /** * Returns the difference between two coordinates as a new * Blockly.utils.Coordinate. * @param {!Blockly.utils.Coordinate|!SVGPoint} a An x/y coordinate. * @param {!Blockly.utils.Coordinate|!SVGPoint} b An x/y coordinate. * @return {!Blockly.utils.Coordinate} A Coordinate representing the difference * between `a` and `b`. */ function difference(a: Blockly.utils.Coordinate|SVGPoint, b: Blockly.utils.Coordinate|SVGPoint): Blockly.utils.Coordinate; /** * Returns the sum of two coordinates as a new Blockly.utils.Coordinate. * @param {!Blockly.utils.Coordinate|!SVGPoint} a An x/y coordinate. * @param {!Blockly.utils.Coordinate|!SVGPoint} b An x/y coordinate. * @return {!Blockly.utils.Coordinate} A Coordinate representing the sum of * the two coordinates. */ function sum(a: Blockly.utils.Coordinate|SVGPoint, b: Blockly.utils.Coordinate|SVGPoint): Blockly.utils.Coordinate; } declare module Blockly.utils.dom { /** * Required name space for SVG elements. * @const */ var SVG_NS: any /*missing*/; /** * Required name space for HTML elements. * @const */ var HTML_NS: any /*missing*/; /** * Required name space for XLINK elements. * @const */ var XLINK_NS: any /*missing*/; /** * Node type constants. * https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeType * @enum {number} */ enum Node { ELEMENT_NODE, TEXT_NODE, COMMENT_NODE, DOCUMENT_POSITION_CONTAINED_BY } /** * Helper method for creating SVG elements. * @param {string} name Element's tag name. * @param {!Object} attrs Dictionary of attribute names and values. * @param {Element} parent Optional parent on which to append the element. * @return {!SVGElement} Newly created SVG element. */ function createSvgElement(name: string, attrs: Object, parent: Element): SVGElement; /** * Add a CSS class to a element. * Similar to Closure's goog.dom.classes.add, except it handles SVG elements. * @param {!Element} element DOM element to add class to. * @param {string} className Name of class to add. * @return {boolean} True if class was added, false if already present. */ function addClass(element: Element, className: string): boolean; /** * Remove a CSS class from a element. * Similar to Closure's goog.dom.classes.remove, except it handles SVG elements. * @param {!Element} element DOM element to remove class from. * @param {string} className Name of class to remove. * @return {boolean} True if class was removed, false if never present. */ function removeClass(element: Element, className: string): boolean; /** * Checks if an element has the specified CSS class. * Similar to Closure's goog.dom.classes.has, except it handles SVG elements. * @param {!Element} element DOM element to check. * @param {string} className Name of class to check. * @return {boolean} True if class exists, false otherwise. */ function hasClass(element: Element, className: string): boolean; /** * Removes a node from its parent. No-op if not attached to a parent. * @param {Node} node The node to remove. * @return {Node} The node removed if removed; else, null. */ function removeNode(node: Node): Node; /** * Insert a node after a reference node. * Contrast with node.insertBefore function. * @param {!Element} newNode New element to insert. * @param {!Element} refNode Existing element to precede new node. */ function insertAfter(newNode: Element, refNode: Element): void; /** * Whether a node contains another node. * @param {!Node} parent The node that should contain the other node. * @param {!Node} descendant The node to test presence of. * @return {boolean} Whether the parent node contains the descendant node. */ function containsNode(parent: Node, descendant: Node): boolean; /** * Sets the CSS transform property on an element. This function sets the * non-vendor-prefixed and vendor-prefixed versions for backwards compatibility * with older browsers. See https://caniuse.com/#feat=transforms2d * @param {!Element} element Element to which the CSS transform will be applied. * @param {string} transform The value of the CSS `transform` property. */ function setCssTransform(element: Element, transform: string): void; /** * Start caching text widths. Every call to this function MUST also call * stopTextWidthCache. Caches must not survive between execution threads. */ function startTextWidthCache(): void; /** * Stop caching field widths. Unless caching was already on when the * corresponding call to startTextWidthCache was made. */ function stopTextWidthCache(): void; /** * Gets the width of a text element, caching it in the process. * @param {!Element} textElement An SVG 'text' element. * @return {number} Width of element. */ function getTextWidth(textElement: Element): number; } declare module Blockly.utils { /** * Reference to the global object. * * More info on this implementation here: * https://docs.google.com/document/d/1NAeW4Wk7I7FV0Y2tcUFvQdGMc89k2vdgSXInw8_nvCI/edit */ var global: any /*missing*/; } declare module Blockly.utils.IdGenerator { /** * Gets the next unique ID. * IDs are compatible with the HTML4 id attribute restrictions: * Use only ASCII letters, digits, '_', '-' and '.' * @return {string} The next unique identifier. */ function getNextUniqueId(): string; } declare module Blockly.utils { /** * Key codes for common characters. * * Copied from Closure's goog.events.KeyCodes * * This list is not localized and therefore some of the key codes are not * correct for non US keyboard layouts. See comments below. * * @enum {number} */ enum KeyCodes { WIN_KEY_FF_LINUX, MAC_ENTER, BACKSPACE, TAB, NUM_CENTER, ENTER, SHIFT, CTRL, ALT, PAUSE, CAPS_LOCK, ESC, SPACE, PAGE_UP, PAGE_DOWN, END, HOME, LEFT, UP, RIGHT, DOWN, PLUS_SIGN, PRINT_SCREEN, INSERT, DELETE, ZERO, ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, FF_SEMICOLON, FF_EQUALS, FF_DASH, FF_HASH, QUESTION_MARK, AT_SIGN, A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z, META, WIN_KEY_RIGHT, CONTEXT_MENU, NUM_ZERO, NUM_ONE, NUM_TWO, NUM_THREE, NUM_FOUR, NUM_FIVE, NUM_SIX, NUM_SEVEN, NUM_EIGHT, NUM_NINE, NUM_MULTIPLY, NUM_PLUS, NUM_MINUS, NUM_PERIOD, NUM_DIVISION, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, NUMLOCK, SCROLL_LOCK, FIRST_MEDIA_KEY, LAST_MEDIA_KEY, SEMICOLON, DASH, EQUALS, COMMA, PERIOD, SLASH, APOSTROPHE, TILDE, SINGLE_QUOTE, OPEN_SQUARE_BRACKET, BACKSLASH, CLOSE_SQUARE_BRACKET, WIN_KEY, MAC_FF_META, MAC_WK_CMD_LEFT, MAC_WK_CMD_RIGHT, WIN_IME, VK_NONAME, PHANTOM } } declare module Blockly.utils.math { /** * Converts degrees to radians. * Copied from Closure's goog.math.toRadians. * @param {number} angleDegrees Angle in degrees. * @return {number} Angle in radians. */ function toRadians(angleDegrees: number): number; /** * Converts radians to degrees. * Copied from Closure's goog.math.toDegrees. * @param {number} angleRadians Angle in radians. * @return {number} Angle in degrees. */ function toDegrees(angleRadians: number): number; /** * Clamp the provided number between the lower bound and the upper bound. * @param {number} lowerBound The desired lower bound. * @param {number} number The number to clamp. * @param {number} upperBound The desired upper bound. * @return {number} The clamped number. */ function clamp(lowerBound: number, number: number, upperBound: number): number; } declare module Blockly.utils.object { /** * Inherit the prototype methods from one constructor into another. * * @param {!Function} childCtor Child class. * @param {!Function} parentCtor Parent class. * @suppress {strictMissingProperties} superClass_ is not defined on Function. */ function inherits(childCtor: Function, parentCtor: Function): void; /** * Copies all the members of a source object to a target object. * @param {!Object} target Target. * @param {!Object} source Source. */ function mixin(target: Object, source: Object): void; /** * Returns an array of a given object's own enumerable property values. * @param {!Object} obj Object containing values. * @return {!Array} Array of values. */ function values(obj: Object): any[]; } declare module Blockly.utils { class Rect extends Rect__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Rect__Class { /** * Class for representing rectangular regions. * @param {number} top Top. * @param {number} bottom Bottom. * @param {number} left Left. * @param {number} right Right. * @struct * @constructor */ constructor(top: number, bottom: number, left: number, right: number); /** @type {number} */ top: number; /** @type {number} */ bottom: number; /** @type {number} */ left: number; /** @type {number} */ right: number; /** * Tests whether this rectangle contains a x/y coordinate. * * @param {number} x The x coordinate to test for containment. * @param {number} y The y coordinate to test for containment. * @return {boolean} Whether this rectangle contains given coordinate. */ contains(x: number, y: number): boolean; } } declare module Blockly.utils { class Size extends Size__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Size__Class { /** * Class for representing sizes consisting of a width and height. * @param {number} width Width. * @param {number} height Height. * @struct * @constructor */ constructor(width: number, height: number); /** * Width * @type {number} */ width: number; /** * Height * @type {number} */ height: number; } } declare module Blockly.utils.Size { /** * Compares sizes for equality. * @param {Blockly.utils.Size} a A Size. * @param {Blockly.utils.Size} b A Size. * @return {boolean} True iff the sizes have equal widths and equal * heights, or if both are null. */ function equals(a: Blockly.utils.Size, b: Blockly.utils.Size): boolean; } declare module Blockly.utils._string { /** * Fast prefix-checker. * Copied from Closure's goog.string.startsWith. * @param {string} str The string to check. * @param {string} prefix A string to look for at the start of `str`. * @return {boolean} True if `str` begins with `prefix`. */ function startsWith(str: string, prefix: string): boolean; /** * Given an array of strings, return the length of the shortest one. * @param {!Array.<string>} array Array of strings. * @return {number} Length of shortest string. */ function shortestStringLength(array: string[]): number; /** * Given an array of strings, return the length of the common prefix. * Words may not be split. Any space after a word is included in the length. * @param {!Array.<string>} array Array of strings. * @param {number=} opt_shortest Length of shortest string. * @return {number} Length of common prefix. */ function commonWordPrefix(array: string[], opt_shortest?: number): number; /** * Given an array of strings, return the length of the common suffix. * Words may not be split. Any space after a word is included in the length. * @param {!Array.<string>} array Array of strings. * @param {number=} opt_shortest Length of shortest string. * @return {number} Length of common suffix. */ function commonWordSuffix(array: string[], opt_shortest?: number): number; /** * Wrap text to the specified width. * @param {string} text Text to wrap. * @param {number} limit Width to wrap each line. * @return {string} Wrapped text. */ function wrap(text: string, limit: number): string; } declare module Blockly.utils.style { /** * Gets the height and width of an element. * Similar to Closure's goog.style.getSize * @param {!Element} element Element to get size of. * @return {!Blockly.utils.Size} Object with width/height properties. */ function getSize(element: Element): Blockly.utils.Size; /** * Retrieves a computed style value of a node. It returns empty string if the * value cannot be computed (which will be the case in Internet Explorer) or * "none" if the property requested is an SVG one and it has not been * explicitly set (firefox and webkit). * * Copied from Closure's goog.style.getComputedStyle * * @param {!Element} element Element to get style of. * @param {string} property Property to get (camel-case). * @return {string} Style value. */ function getComputedStyle(element: Element, property: string): string; /** * Gets the cascaded style value of a node, or null if the value cannot be * computed (only Internet Explorer can do this). * * Copied from Closure's goog.style.getCascadedStyle * * @param {!Element} element Element to get style of. * @param {string} style Property to get (camel-case). * @return {string} Style value. */ function getCascadedStyle(element: Element, style: string): string; /** * Returns a Coordinate object relative to the top-left of the HTML document. * Similar to Closure's goog.style.getPageOffset * @param {!Element} el Element to get the page offset for. * @return {!Blockly.utils.Coordinate} The page offset. */ function getPageOffset(el: Element): Blockly.utils.Coordinate; /** * Calculates the viewport coordinates relative to the document. * Similar to Closure's goog.style.getViewportPageOffset * @return {!Blockly.utils.Coordinate} The page offset of the viewport. */ function getViewportPageOffset(): Blockly.utils.Coordinate; /** * Shows or hides an element from the page. Hiding the element is done by * setting the display property to "none", removing the element from the * rendering hierarchy so it takes up no space. To show the element, the default * inherited display property is restored (defined either in stylesheets or by * the browser's default style rules). * Copied from Closure's goog.style.getViewportPageOffset * * @param {!Element} el Element to show or hide. * @param {*} isShown True to render the element in its default style, * false to disable rendering the element. */ function setElementShown(el: Element, isShown: any): void; /** * Returns true if the element is using right to left (RTL) direction. * Copied from Closure's goog.style.isRightToLeft * * @param {!Element} el The element to test. * @return {boolean} True for right to left, false for left to right. */ function isRightToLeft(el: Element): boolean; /** * Gets the computed border widths (on all sides) in pixels * Copied from Closure's goog.style.getBorderBox * @param {!Element} element The element to get the border widths for. * @return {!Object} The computed border widths. */ function getBorderBox(element: Element): Object; /** * Changes the scroll position of `container` with the minimum amount so * that the content and the borders of the given `element` become visible. * If the element is bigger than the container, its top left corner will be * aligned as close to the container's top left corner as possible. * Copied from Closure's goog.style.scrollIntoContainerView * * @param {!Element} element The element to make visible. * @param {!Element} container The container to scroll. If not set, then the * document scroll element will be used. * @param {boolean=} opt_center Whether to center the element in the container. * Defaults to false. */ function scrollIntoContainerView(element: Element, container: Element, opt_center?: boolean): void; /** * Calculate the scroll position of `container` with the minimum amount so * that the content and the borders of the given `element` become visible. * If the element is bigger than the container, its top left corner will be * aligned as close to the container's top left corner as possible. * Copied from Closure's goog.style.getContainerOffsetToScrollInto * * @param {!Element} element The element to make visible. * @param {!Element} container The container to scroll. If not set, then the * document scroll element will be used. * @param {boolean=} opt_center Whether to center the element in the container. * Defaults to false. * @return {!Blockly.utils.Coordinate} The new scroll position of the container, * in form of goog.math.Coordinate(scrollLeft, scrollTop). */ function getContainerOffsetToScrollInto(element: Element, container: Element, opt_center?: boolean): Blockly.utils.Coordinate; } declare module Blockly.utils.svgPaths { /** * Create a string representing the given x, y pair. It does not matter whether * the coordinate is relative or absolute. The result has leading * and trailing spaces, and separates the x and y coordinates with a comma but * no space. * @param {number} x The x coordinate. * @param {number} y The y coordinate. * @return {string} A string of the format ' x,y ' * @public */ function point(x: number, y: number): string; /** * Draw a curbic or quadratic curve. See * developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d#Cubic_B%C3%A9zier_Curve * These coordinates are unitless and hence in the user coordinate system. * @param {string} command The command to use. * Should be one of: c, C, s, S, q, Q. * @param {!Array.<string>} points An array containing all of the points to pass to the * curve command, in order. The points are represented as strings of the * format ' x, y '. * @return {string} A string defining one or more Bezier curves. See the MDN * documentation for exact format. * @public */ function curve(command: string, points: string[]): string; /** * Move the cursor to the given position without drawing a line. * The coordinates are absolute. * These coordinates are unitless and hence in the user coordinate system. * See developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths#Line_commands * @param {number} x The absolute x coordinate. * @param {number} y The absolute y coordinate. * @return {string} A string of the format ' M x,y ' * @public */ function moveTo(x: number, y: number): string; /** * Move the cursor to the given position without drawing a line. * Coordinates are relative. * These coordinates are unitless and hence in the user coordinate system. * See developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths#Line_commands * @param {number} dx The relative x coordinate. * @param {number} dy The relative y coordinate. * @return {string} A string of the format ' m dx,dy ' * @public */ function moveBy(dx: number, dy: number): string; /** * Draw a line from the current point to the end point, which is the current * point shifted by dx along the x-axis and dy along the y-axis. * These coordinates are unitless and hence in the user coordinate system. * See developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths#Line_commands * @param {number} dx The relative x coordinate. * @param {number} dy The relative y coordinate. * @return {string} A string of the format ' l dx,dy ' * @public */ function lineTo(dx: number, dy: number): string; /** * Draw multiple lines connecting all of the given points in order. This is * equivalent to a series of 'l' commands. * These coordinates are unitless and hence in the user coordinate system. * See developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths#Line_commands * @param {!Array.<string>} points An array containing all of the points to * draw lines to, in order. The points are represented as strings of the * format ' dx,dy '. * @return {string} A string of the format ' l (dx,dy)+ ' * @public */ function line(points: string[]): string; /** * Draw a horizontal or vertical line. * The first argument specifies the direction and whether the given position is * relative or absolute. * These coordinates are unitless and hence in the user coordinate system. * See developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d#LineTo_path_commands * @param {string} command The command to prepend to the coordinate. This * should be one of: V, v, H, h. * @param {number} val The coordinate to pass to the command. It may be * absolute or relative. * @return {string} A string of the format ' command val ' * @public */ function lineOnAxis(command: string, val: number): string; /** * Draw an elliptical arc curve. * These coordinates are unitless and hence in the user coordinate system. * See developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d#Elliptical_Arc_Curve * @param {string} command The command string. Either 'a' or 'A'. * @param {string} flags The flag string. See the MDN documentation for a * description and examples. * @param {number} radius The radius of the arc to draw. * @param {string} point The point to move the cursor to after drawing the arc, * specified either in absolute or relative coordinates depending on the * command. * @return {string} A string of the format 'command radius radius flags point' * @public */ function arc(command: string, flags: string, radius: number, point: string): string; } declare module Blockly.utils.xml { /** * Namespace for Blockly's XML. */ var NAME_SPACE: any /*missing*/; /** * Get the document object. This method is overridden in the Node.js build of * Blockly. See gulpfile.js, package-blockly-node task. * @return {!Document} The document object. * @public */ function document(): Document; /** * Create DOM element for XML. * @param {string} tagName Name of DOM element. * @return {!Element} New DOM element. * @public */ function createElement(tagName: string): Element; /** * Create text element for XML. * @param {string} text Text content. * @return {!Node} New DOM node. * @public */ function createTextNode(text: string): Node; /** * Converts an XML string into a DOM tree. * @param {string} text XML string. * @return {Document} The DOM document. * @throws if XML doesn't parse. * @public */ function textToDomDocument(text: string): Document; /** * Converts a DOM structure into plain text. * Currently the text format is fairly ugly: all one line with no whitespace. * @param {!Element} dom A tree of XML elements. * @return {string} Text representation. * @public */ function domToText(dom: Element): string; } declare module Blockly { class Menu extends Menu__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Menu__Class extends Blockly.Component__Class { /** * A basic menu class. * @constructor * @extends {Blockly.Component} */ constructor(); /** * Focus the menu element. * @package */ focus(): void; /** * Blur the menu element. * @package */ blur(): void; /** * Set the menu accessibility role. * @param {!Blockly.utils.aria.Role|string} roleName role name. * @package */ setRole(roleName: Blockly.utils.aria.Role|string): void; /** * Returns the child menuitem that owns the given DOM node, or null if no such * menuitem is found. * @param {Node} node DOM node whose owner is to be returned. * @return {?Blockly.MenuItem} menuitem for which the DOM node belongs to. * @protected */ getMenuItem(node: Node): Blockly.MenuItem; /** * Unhighlight the current highlighted item. * @protected */ unhighlightCurrent(): void; /** * Clears the currently highlighted item. * @protected */ clearHighlighted(): void; /** * Returns the currently highlighted item (if any). * @return {?Blockly.Component} Highlighted item (null if none). * @protected */ getHighlighted(): Blockly.Component; /** * Highlights the item at the given 0-based index (if any). If another item * was previously highlighted, it is un-highlighted. * @param {number} index Index of item to highlight (-1 removes the current * highlight). * @protected */ setHighlightedIndex(index: number): void; /** * Highlights the given item if it exists and is a child of the container; * otherwise un-highlights the currently highlighted item. * @param {Blockly.MenuItem} item Item to highlight. * @protected */ setHighlighted(item: Blockly.MenuItem): void; /** * Highlights the next highlightable item (or the first if nothing is currently * highlighted). * @package */ highlightNext(): void; /** * Highlights the previous highlightable item (or the last if nothing is * currently highlighted). * @package */ highlightPrevious(): void; /** * Helper function that manages the details of moving the highlight among * child menuitems in response to keyboard events. * @param {function(this: Blockly.Component, number, number) : number} fn * Function that accepts the current and maximum indices, and returns the * next index to check. * @param {number} startIndex Start index. * @return {boolean} Whether the highlight has changed. * @protected */ highlightHelper(fn: { (_0: number, _1: number): number }, startIndex: number): boolean; /** * Returns whether the given item can be highlighted. * @param {Blockly.MenuItem} item The item to check. * @return {boolean} Whether the item can be highlighted. * @protected */ canHighlightItem(item: Blockly.MenuItem): boolean; /** * Attempts to handle a keyboard event, if the menuitem is enabled, by calling * {@link handleKeyEventInternal}. Considered protected; should only be used * within this package and by subclasses. * @param {Event} e Key event to handle. * @return {boolean} Whether the key event was handled. * @protected */ handleKeyEvent(e: Event): boolean; /** * Attempts to handle a keyboard event; returns true if the event was handled, * false otherwise. If the container is enabled, and a child is highlighted, * calls the child menuitem's `handleKeyEvent` method to give the menuitem * a chance to handle the event first. * @param {Event} e Key event to handle. * @return {boolean} Whether the event was handled by the container (or one of * its children). * @protected */ handleKeyEventInternal(e: Event): boolean; } } declare module Blockly { class MenuItem extends MenuItem__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class MenuItem__Class extends Blockly.Component__Class { /** * Class representing an item in a menu. * * @param {string} content Text caption to display as the content of * the item. * @param {string=} opt_value Data/model associated with the menu item. * @constructor * @extends {Blockly.Component} */ constructor(content: string, opt_value?: string); /** * @return {Element} The HTML element for the checkbox. * @protected */ getCheckboxDom(): Element; /** * @return {!Element} The HTML for the content. * @protected */ getContentDom(): Element; /** * @return {!Element} The HTML for the content wrapper. * @protected */ getContentWrapperDom(): Element; /** * Sets the content associated with the menu item. * @param {string} content Text caption to set as the * menuitem's contents. * @protected */ setContentInternal(content: string): void; /** * Sets the value associated with the menu item. * @param {*} value Value to be associated with the menu item. * @package */ setValue(value: any): void; /** * Gets the value associated with the menu item. * @return {*} value Value associated with the menu item. * @package */ getValue(): any; /** * Set the menu accessibility role. * @param {!Blockly.utils.aria.Role|string} roleName role name. * @package */ setRole(roleName: Blockly.utils.aria.Role|string): void; /** * Sets the menu item to be checkable or not. Set to true for menu items * that represent checkable options. * @param {boolean} checkable Whether the menu item is checkable. * @package */ setCheckable(checkable: boolean): void; /** * Checks or unchecks the component. * @param {boolean} checked Whether to check or uncheck the component. * @package */ setChecked(checked: boolean): void; /** * Highlights or unhighlights the component. * @param {boolean} highlight Whether to highlight or unhighlight the component. * @package */ setHighlighted(highlight: boolean): void; /** * Returns true if the menu item is enabled, false otherwise. * @return {boolean} Whether the menu item is enabled. * @package */ isEnabled(): boolean; /** * Enables or disables the menu item. * @param {boolean} enabled Whether to enable or disable the menu item. * @package */ setEnabled(enabled: boolean): void; /** * Handles click events. If the component is enabled, trigger * the action associated with this menu item. * @param {Event} _e Mouse event to handle. * @package */ handleClick(_e: Event): void; /** * Performs the appropriate action when the menu item is activated * by the user. * @protected */ performActionInternal(): void; /** * Set the handler that's triggered when the menu item is activated * by the user. If `opt_obj` is provided, it will be used as the * 'this' object in the function when called. * @param {function(this:T,!Blockly.MenuItem):?} fn The handler. * @param {T=} opt_obj Used as the 'this' object in f when called. * @template T * @package */ onAction<T>(fn: { (_0: Blockly.MenuItem): any }, opt_obj?: T): void; } } declare module Blockly.tree { class BaseNode extends BaseNode__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BaseNode__Class extends Blockly.Component__Class { /** * An abstract base class for a node in the tree. * Similar to goog.ui.tree.BaseNode * * @param {string} content The content of the node label treated as * plain-text and will be HTML escaped. * @param {!Blockly.tree.BaseNode.Config} config The configuration for the tree. * @constructor * @extends {Blockly.Component} */ constructor(content: string, config: Blockly.tree.BaseNode.Config); /** * @type {Blockly.tree.TreeControl} * @protected */ tree: Blockly.tree.TreeControl; /** * Adds roles and states. * @protected */ initAccessibility(): void; /** * Appends a node as a child to the current node. * @param {Blockly.tree.BaseNode} child The child to add. * @package */ add(child: Blockly.tree.BaseNode): void; /** * Returns the tree. * @return {?Blockly.tree.TreeControl} tree * @protected */ getTree(): Blockly.tree.TreeControl; /** * Returns the depth of the node in the tree. Should not be overridden. * @return {number} The non-negative depth of this node (the root is zero). * @protected */ getDepth(): number; /** * Returns true if the node is a descendant of this node * @param {Blockly.tree.BaseNode} node The node to check. * @return {boolean} True if the node is a descendant of this node, false * otherwise. * @protected */ contains(node: Blockly.tree.BaseNode): boolean; /** * This is re-defined here to indicate to the closure compiler the correct * child return type. * @param {number} index 0-based index. * @return {Blockly.tree.BaseNode} The child at the given index; null if none. * @protected */ getChildAt(index: number): Blockly.tree.BaseNode; /** * Returns the children of this node. * @return {!Array.<!Blockly.tree.BaseNode>} The children. * @package */ getChildren(): Blockly.tree.BaseNode[]; /** * @return {Blockly.tree.BaseNode} The first child of this node. * @protected */ getFirstChild(): Blockly.tree.BaseNode; /** * @return {Blockly.tree.BaseNode} The last child of this node. * @protected */ getLastChild(): Blockly.tree.BaseNode; /** * @return {Blockly.tree.BaseNode} The previous sibling of this node. * @protected */ getPreviousSibling(): Blockly.tree.BaseNode; /** * @return {Blockly.tree.BaseNode} The next sibling of this node. * @protected */ getNextSibling(): Blockly.tree.BaseNode; /** * @return {boolean} Whether the node is the last sibling. * @protected */ isLastSibling(): boolean; /** * @return {boolean} Whether the node is selected. * @protected */ isSelected(): boolean; /** * Selects the node. * @protected */ select(): void; /** * Selects the first node. * @protected */ selectFirst(): void; /** * Called from the tree to instruct the node change its selection state. * @param {boolean} selected The new selection state. * @protected */ setSelectedInternal(selected: boolean): void; /** * @return {boolean} Whether the node is expanded. * @protected */ getExpanded(): boolean; /** * Sets the node to be expanded internally, without state change events. * @param {boolean} expanded Whether to expand or close the node. * @protected */ setExpandedInternal(expanded: boolean): void; /** * Sets the node to be expanded. * @param {boolean} expanded Whether to expand or close the node. * @package */ setExpanded(expanded: boolean): void; /** * Used to notify a node of that we have expanded it. * Can be overidden by subclasses, see Blockly.tree.TreeNode. * @protected */ doNodeExpanded(): void; /** * Used to notify a node that we have collapsed it. * Can be overidden by subclasses, see Blockly.tree.TreeNode. * @protected */ doNodeCollapsed(): void; /** * Toggles the expanded state of the node. * @protected */ toggle(): void; /** * @return {boolean} Whether the node is collapsible by user actions. * @protected */ isUserCollapsible(): boolean; /** * Creates HTML Element for the node. * @return {!Element} HTML element * @protected */ toDom(): Element; /** * @return {!Element} The HTML element for the row. * @protected */ getRowDom(): Element; /** * @return {string} The class name for the row. * @protected */ getRowClassName(): string; /** * @return {!Element} The HTML element for the label. * @protected */ getLabelDom(): Element; /** * @return {!Element} The HTML for the icon. * @protected */ getIconDom(): Element; /** * Gets the calculated icon class. * @protected */ getCalculatedIconClass(): void; /** * @return {string} The background position style value. * @protected */ getBackgroundPosition(): string; /** * @return {Element} The row is the div that is used to draw the node without * the children. * @package */ getRowElement(): Element; /** * @return {Element} The icon element. * @protected */ getIconElement(): Element; /** * @return {Element} The label element. * @protected */ getLabelElement(): Element; /** * @return {Element} The div containing the children. * @protected */ getChildrenElement(): Element; /** * Gets the icon class for the node. * @return {string} s The icon source. * @protected */ getIconClass(): string; /** * Gets the icon class for when the node is expanded. * @return {string} The class. * @protected */ getExpandedIconClass(): string; /** * Sets the text of the label. * @param {string} s The plain text of the label. * @protected */ setText(s: string): void; /** * Returns the text of the label. If the text was originally set as HTML, the * return value is unspecified. * @return {string} The plain text of the label. * @package */ getText(): string; /** * Updates the row styles. * @protected */ updateRow(): void; /** * Updates the expand icon of the node. * @protected */ updateExpandIcon(): void; /** * Handles mouse down event. * @param {!Event} e The browser event. * @protected */ onMouseDown(e: Event): void; /** * Handles a click event. * @param {!Event} e The browser event. * @protected */ onClick_(e: Event): void; /** * Handles a key down event. * @param {!Event} e The browser event. * @return {boolean} The handled value. * @protected */ onKeyDown(e: Event): boolean; /** * Select the next node. * @return {boolean} True if the action has been handled, false otherwise. * @package */ selectNext(): boolean; /** * Select the previous node. * @return {boolean} True if the action has been handled, false otherwise. * @package */ selectPrevious(): boolean; /** * Select the parent node or collapse the current node. * @return {boolean} True if the action has been handled, false otherwise. * @package */ selectParent(): boolean; /** * Expand the current node if it's not already expanded, or select the * child node. * @return {boolean} True if the action has been handled, false otherwise. * @package */ selectChild(): boolean; /** * @return {Blockly.tree.BaseNode} The last shown descendant. * @protected */ getLastShownDescendant(): Blockly.tree.BaseNode; /** * @return {Blockly.tree.BaseNode} The next node to show or null if there isn't * a next node to show. * @protected */ getNextShownNode(): Blockly.tree.BaseNode; /** * @return {Blockly.tree.BaseNode} The previous node to show. * @protected */ getPreviousShownNode(): Blockly.tree.BaseNode; /** * @return {!Blockly.tree.BaseNode.Config} The configuration for the tree. * @protected */ getConfig(): Blockly.tree.BaseNode.Config; /** * Internal method that is used to set the tree control on the node. * @param {Blockly.tree.TreeControl} tree The tree control. * @protected */ setTreeInternal(tree: Blockly.tree.TreeControl): void; } } declare module Blockly.tree.BaseNode { /** * The config type for the tree. * @typedef {{ * indentWidth:number, * cssRoot:string, * cssHideRoot:string, * cssTreeRow:string, * cssItemLabel:string, * cssTreeIcon:string, * cssExpandedFolderIcon:string, * cssCollapsedFolderIcon:string, * cssFileIcon:string, * cssSelectedRow:string * }} */ interface Config { indentWidth: number; cssRoot: string; cssHideRoot: string; cssTreeRow: string; cssItemLabel: string; cssTreeIcon: string; cssExpandedFolderIcon: string; cssCollapsedFolderIcon: string; cssFileIcon: string; cssSelectedRow: string } /** * Map of nodes in existence. Needed to route events to the appropriate nodes. * Nodes are added to the map at {@link #enterDocument} time and removed at * {@link #exitDocument} time. * @type {Object} * @protected */ var allNodes: Object; } declare module Blockly.tree { class TreeControl extends TreeControl__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class TreeControl__Class extends Blockly.tree.BaseNode__Class { /** * An extension of the TreeControl object in closure that provides * a way to view a hierarchical set of data. * Similar to Closure's goog.ui.tree.TreeControl * * @param {Blockly.Toolbox} toolbox The parent toolbox for this tree. * @param {!Blockly.tree.BaseNode.Config} config The configuration for the tree. * @constructor * @extends {Blockly.tree.BaseNode} */ constructor(toolbox: Blockly.Toolbox, config: Blockly.tree.BaseNode.Config); /** * Returns the associated toolbox. * @return {Blockly.Toolbox} The toolbox. * @package */ getToolbox(): Blockly.Toolbox; /** * Get whether this tree has focus or not. * @return {boolean} True if it has focus. * @package */ hasFocus(): boolean; /** * Sets the selected item. * @param {Blockly.tree.BaseNode} node The item to select. * @package */ setSelectedItem(node: Blockly.tree.BaseNode): void; /** * Set the handler that's triggered before a node is selected. * @param {function(Blockly.tree.BaseNode):boolean} fn The handler * @package */ onBeforeSelected(fn: { (_0: Blockly.tree.BaseNode): boolean }): void; /** * Set the handler that's triggered after a node is selected. * @param {function( * Blockly.tree.BaseNode, Blockly.tree.BaseNode):?} fn The handler * @package */ onAfterSelected(fn: { (_0: Blockly.tree.BaseNode, _1: Blockly.tree.BaseNode): any }): void; /** * Returns the selected item. * @return {Blockly.tree.BaseNode} The currently selected item. * @package */ getSelectedItem(): Blockly.tree.BaseNode; /** * Creates a new tree node using the same config as the root. * @param {string=} opt_content The content of the node label. * @return {!Blockly.tree.TreeNode} The new item. * @package */ createNode(opt_content?: string): Blockly.tree.TreeNode; } } declare module Blockly.tree { class TreeNode extends TreeNode__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class TreeNode__Class extends Blockly.tree.BaseNode__Class { /** * A single node in the tree, customized for Blockly's UI. * Similar to Closure's goog.ui.tree.TreeNode * * @param {Blockly.Toolbox} toolbox The parent toolbox for this tree. * @param {string} content The content of the node label treated as * plain-text and will be HTML escaped. * @param {!Blockly.tree.BaseNode.Config} config The configuration for the tree. * @constructor * @extends {Blockly.tree.BaseNode} */ constructor(toolbox: Blockly.Toolbox, content: string, config: Blockly.tree.BaseNode.Config); /** * Set the handler that's triggered when the size of node has changed. * @param {function():?} fn The handler * @package */ onSizeChanged(fn: { (): any }): void; } } declare module Blockly.blockRendering { /** * Whether or not the debugger is turned on. * @type {boolean} * @package */ var useDebugger: boolean; /** * Registers a new renderer. * @param {string} name The name of the renderer. * @param {!Function} rendererClass The new renderer class * to register. * @throws {Error} if a renderer with the same name has already been registered. */ function register(name: string, rendererClass: Function): void; /** * Unregisters the renderer registered with the given name. * @param {string} name The name of the renderer. */ function unregister(name: string): void; /** * Turn on the blocks debugger. * @package */ function startDebugger(): void; /** * Turn off the blocks debugger. * @package */ function stopDebugger(): void; /** * Initialize anything needed for rendering (constants, etc). * @param {!string} name Name of the renderer to initialize. * @return {!Blockly.blockRendering.Renderer} The new instance of a renderer. * Already initialized. * @package */ function init(name: string): Blockly.blockRendering.Renderer; } declare module Blockly.blockRendering { class ConstantProvider extends ConstantProvider__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ConstantProvider__Class { /** * An object that provides constants for rendering blocks. * @constructor * @package */ constructor(); /** * Rounded corner radius. * @type {number} */ CORNER_RADIUS: number; /** * The height of an empty statement input. Note that in the old rendering this * varies slightly depending on whether the block has external or inline inputs. * In the new rendering this is consistent. It seems unlikely that the old * behaviour was intentional. * @const * @type {number} */ EMPTY_STATEMENT_INPUT_HEIGHT: number; /** * Height of SVG path for jagged teeth at the end of collapsed blocks. * @const */ JAGGED_TEETH_HEIGHT: any /*missing*/; /** * Width of SVG path for jagged teeth at the end of collapsed blocks. * @const */ JAGGED_TEETH_WIDTH: any /*missing*/; /** * Initialize shape objects based on the constants set in the constructor. * @package */ init(): void; /** * An object containing sizing and path information about collapsed block * indicators. * @type {!Object} */ JAGGED_TEETH: Object; /** * An object containing sizing and path information about notches. * @type {!Object} */ NOTCH: Object; /** * An object containing sizing and path information about start hats * @type {!Object} */ START_HAT: Object; /** * An object containing sizing and path information about puzzle tabs. * @type {!Object} */ PUZZLE_TAB: Object; /** * An object containing sizing and path information about inside corners * @type {!Object} */ INSIDE_CORNERS: Object; /** * An object containing sizing and path information about outside corners. * @type {!Object} */ OUTSIDE_CORNERS: Object; /** * @return {!Object} An object containing sizing and path information about * collapsed block indicators. * @package */ makeJaggedTeeth(): Object; /** * @return {!Object} An object containing sizing and path information about * start hats. * @package */ makeStartHat(): Object; /** * @return {!Object} An object containing sizing and path information about * puzzle tabs. * @package */ makePuzzleTab(): Object; /** * @return {!Object} An object containing sizing and path information about * notches. * @package */ makeNotch(): Object; /** * @return {!Object} An object containing sizing and path information about * inside corners. * @package */ makeInsideCorners(): Object; /** * @return {!Object} An object containing sizing and path information about * outside corners. * @package */ makeOutsideCorners(): Object; /** * Get an object with connection shape and sizing information based on the type * of the connection. * @param {!Blockly.RenderedConnection} connection The connection to find a * shape object for * @return {!Object} The shape object for the connection. * @package */ shapeFor(connection: Blockly.RenderedConnection): Object; } } declare module Blockly.blockRendering { class Debug extends Debug__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Debug__Class { /** * An object that renders rectangles and dots for debugging rendering code. * @package * @constructor */ constructor(); /** * An array of SVG elements that have been created by this object. * @type {Array.<!SVGElement>} */ debugElements_: SVGElement[]; /** * The SVG root of the block that is being rendered. Debug elements will * be attached to this root. * @type {SVGElement} */ svgRoot_: SVGElement; /** * Remove all elements the this object created on the last pass. * @package */ clearElems(): void; /** * Draw a debug rectangle for a spacer (empty) row. * @param {!Blockly.blockRendering.Row} row The row to render. * @param {number} cursorY The y position of the top of the row. * @param {boolean} isRtl Whether the block is rendered RTL. * @package */ drawSpacerRow(row: Blockly.blockRendering.Row, cursorY: number, isRtl: boolean): void; /** * Draw a debug rectangle for a horizontal spacer. * @param {!Blockly.blockRendering.InRowSpacer} elem The spacer to render. * @param {number} rowHeight The height of the container row. * @param {boolean} isRtl Whether the block is rendered RTL. * @package */ drawSpacerElem(elem: Blockly.blockRendering.InRowSpacer, rowHeight: number, isRtl: boolean): void; /** * Draw a debug rectangle for an in-row element. * @param {!Blockly.blockRendering.Measurable} elem The element to render. * @param {boolean} isRtl Whether the block is rendered RTL. * @package */ drawRenderedElem(elem: Blockly.blockRendering.Measurable, isRtl: boolean): void; /** * Draw a circle at the location of the given connection. Inputs and outputs * share the same colors, as do previous and next. When positioned correctly * a connected pair will look like a bullseye. * @param {Blockly.RenderedConnection} conn The connection to circle. * @package */ drawConnection(conn: Blockly.RenderedConnection): void; /** * Draw a debug rectangle for a non-empty row. * @param {!Blockly.blockRendering.Row} row The non-empty row to render. * @param {number} cursorY The y position of the top of the row. * @param {boolean} isRtl Whether the block is rendered RTL. * @package */ drawRenderedRow(row: Blockly.blockRendering.Row, cursorY: number, isRtl: boolean): void; /** * Draw debug rectangles for a non-empty row and all of its subcomponents. * @param {!Blockly.blockRendering.Row} row The non-empty row to render. * @param {number} cursorY The y position of the top of the row. * @param {boolean} isRtl Whether the block is rendered RTL. * @package */ drawRowWithElements(row: Blockly.blockRendering.Row, cursorY: number, isRtl: boolean): void; /** * Draw a debug rectangle around the entire block. * @param {!Blockly.blockRendering.RenderInfo} info Rendering information about * the block to debug. * @package */ drawBoundingBox(info: Blockly.blockRendering.RenderInfo): void; /** * Do all of the work to draw debug information for the whole block. * @param {!Blockly.BlockSvg} block The block to draw debug information for. * @param {!Blockly.blockRendering.RenderInfo} info Rendering information about * the block to debug. * @package */ drawDebug(block: Blockly.BlockSvg, info: Blockly.blockRendering.RenderInfo): void; } } declare module Blockly.blockRendering.Debug { /** * Configuration object containing booleans to enable and disable debug * rendering of specific rendering components. * @type {!Object.<string, boolean>} */ var config: { [key: string]: boolean }; } declare module Blockly.blockRendering { class Drawer extends Drawer__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Drawer__Class { /** * An object that draws a block based on the given rendering information. * @param {!Blockly.BlockSvg} block The block to render. * @param {!Blockly.blockRendering.RenderInfo} info An object containing all * information needed to render this block. * @package * @constructor */ constructor(block: Blockly.BlockSvg, info: Blockly.blockRendering.RenderInfo); /** * The renderer's constant provider. * @type {!Blockly.blockRendering.ConstantProvider} * @protected */ constants_: Blockly.blockRendering.ConstantProvider; /** * Draw the block to the workspace. Here "drawing" means setting SVG path * elements and moving fields, icons, and connections on the screen. * * The pieces of the paths are pushed into arrays of "steps", which are then * joined with spaces and set directly on the block. This guarantees that * the steps are separated by spaces for improved readability, but isn't * required. * @package */ draw(): void; /** * Save sizing information back to the block * Most of the rendering information can be thrown away at the end of the * render. Anything that needs to be kept around should be set in this function. * @protected */ recordSizeOnBlock_(): void; /** * Hide icons that were marked as hidden. * @protected */ hideHiddenIcons_(): void; /** * Create the outline of the block. This is a single continuous path. * @protected */ drawOutline_(): void; /** * Add steps for the top corner of the block, taking into account * details such as hats and rounded corners. * @protected */ drawTop_(): void; /** * Add steps for the jagged edge of a row on a collapsed block. * @param {!Blockly.blockRendering.Row} row The row to draw the side of. * @protected */ drawJaggedEdge_(row: Blockly.blockRendering.Row): void; /** * Add steps for an external value input, rendered as a notch in the side * of the block. * @param {!Blockly.blockRendering.Row} row The row that this input * belongs to. * @protected */ drawValueInput_(row: Blockly.blockRendering.Row): void; /** * Add steps for a statement input. * @param {!Blockly.blockRendering.Row} row The row that this input * belongs to. * @protected */ drawStatementInput_(row: Blockly.blockRendering.Row): void; /** * Add steps for the right side of a row that does not have value or * statement input connections. * @param {!Blockly.blockRendering.Row} row The row to draw the * side of. * @protected */ drawRightSideRow_(row: Blockly.blockRendering.Row): void; /** * Add steps for the bottom edge of a block, possibly including a notch * for the next connection * @protected */ drawBottom_(): void; /** * Add steps for the left side of the block, which may include an output * connection * @protected */ drawLeft_(): void; /** * Draw the internals of the block: inline inputs, fields, and icons. These do * not depend on the outer path for placement. * @protected */ drawInternals_(): void; /** * Push a field or icon's new position to its SVG root. * @param {!Blockly.blockRendering.Icon|!Blockly.blockRendering.Field} fieldInfo * The rendering information for the field or icon. * @protected */ layoutField_(fieldInfo: Blockly.blockRendering.Icon|Blockly.blockRendering.Field): void; /** * Add steps for an inline input. * @param {!Blockly.blockRendering.InlineInput} input The information about the * input to render. * @protected */ drawInlineInput_(input: Blockly.blockRendering.InlineInput): void; /** * Position the connection on an inline value input, taking into account * RTL and the small gap between the parent block and child block which lets the * parent block's dark path show through. * @param {Blockly.blockRendering.InlineInput} input The information about * the input that the connection is on. * @protected */ positionInlineInputConnection_(input: Blockly.blockRendering.InlineInput): void; /** * Position the connection on a statement input, taking into account * RTL and the small gap between the parent block and child block which lets the * parent block's dark path show through. * @param {!Blockly.blockRendering.Row} row The row that the connection is on. * @protected */ positionStatementInputConnection_(row: Blockly.blockRendering.Row): void; /** * Position the connection on an external value input, taking into account * RTL and the small gap between the parent block and child block which lets the * parent block's dark path show through. * @param {!Blockly.blockRendering.Row} row The row that the connection is on. * @protected */ positionExternalValueConnection_(row: Blockly.blockRendering.Row): void; /** * Position the previous connection on a block. * @protected */ positionPreviousConnection_(): void; /** * Position the next connection on a block. * @protected */ positionNextConnection_(): void; /** * Position the output connection on a block. * @protected */ positionOutputConnection_(): void; } } declare module Blockly.blockRendering { class RenderInfo extends RenderInfo__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class RenderInfo__Class { /** * An object containing all sizing information needed to draw this block. * * This measure pass does not propagate changes to the block (although fields * may choose to rerender when getSize() is called). However, calling it * repeatedly may be expensive. * * @param {!Blockly.blockRendering.Renderer} renderer The renderer in use. * @param {!Blockly.BlockSvg} block The block to measure. * @constructor * @package */ constructor(renderer: Blockly.blockRendering.Renderer, block: Blockly.BlockSvg); /** * The block renderer in use. * @type {!Blockly.blockRendering.Renderer} * @protected */ renderer_: Blockly.blockRendering.Renderer; /** * The renderer's constant provider. * @type {!Blockly.blockRendering.ConstantProvider} * @protected */ constants_: Blockly.blockRendering.ConstantProvider; /** * A measurable representing the output connection if the block has one. * Otherwise null. * @type {Blockly.blockRendering.OutputConnection} */ outputConnection: Blockly.blockRendering.OutputConnection; /** * Whether the block should be rendered as a single line, either because it's * inline or because it has been collapsed. * @type {boolean} */ isInline: boolean; /** * Whether the block is collapsed. * @type {boolean} */ isCollapsed: boolean; /** * Whether the block is an insertion marker. Insertion markers are the same * shape as normal blocks, but don't show fields. * @type {boolean} */ isInsertionMarker: boolean; /** * True if the block should be rendered right-to-left. * @type {boolean} */ RTL: boolean; /** * The height of the rendered block, including child blocks. * @type {number} */ height: number; /** * The width of the rendered block, including child blocks. * @type {number} */ widthWithChildren: number; /** * The width of the rendered block, excluding child blocks. This is the right * edge of the block when rendered LTR. * @type {number} */ width: number; /** * * @type {number} */ statementEdge: number; /** * An array of Row objects containing sizing information. * @type {!Array.<!Blockly.blockRendering.Row>} */ rows: Blockly.blockRendering.Row[]; /** * An array of measurable objects containing hidden icons. * @type {!Array.<!Blockly.blockRendering.Icon>} */ hiddenIcons: Blockly.blockRendering.Icon[]; /** * An object with rendering information about the top row of the block. * @type {!Blockly.blockRendering.TopRow} */ topRow: Blockly.blockRendering.TopRow; /** * An object with rendering information about the bottom row of the block. * @type {!Blockly.blockRendering.BottomRow} */ bottomRow: Blockly.blockRendering.BottomRow; /** * Get the block renderer in use. * @return {!Blockly.blockRendering.Renderer} The block renderer in use. * @package */ getRenderer(): Blockly.blockRendering.Renderer; /** * Populate and return an object containing all sizing information needed to * draw this block. * * This measure pass does not propagate changes to the block (although fields * may choose to rerender when getSize() is called). However, calling it * repeatedly may be expensive. * * @package */ measure(): void; /** * Create rows of Measurable objects representing all renderable parts of the * block. * @protected */ createRows_(): void; /** * Create all non-spacer elements that belong on the top row. * @package */ populateTopRow_(): void; /** * Create all non-spacer elements that belong on the bottom row. * @package */ populateBottomRow_(): void; /** * Add an input element to the active row, if needed, and record the type of the * input on the row. * @param {!Blockly.Input} input The input to record information about. * @param {!Blockly.blockRendering.Row} activeRow The row that is currently being * populated. * @protected */ addInput_(input: Blockly.Input, activeRow: Blockly.blockRendering.Row): void; /** * Decide whether to start a new row between the two Blockly.Inputs. * @param {!Blockly.Input} input The first input to consider * @param {Blockly.Input} lastInput The input that follows. * @return {boolean} True if the next input should be rendered on a new row. * @protected */ shouldStartNewRow_(input: Blockly.Input, lastInput: Blockly.Input): boolean; /** * Add horizontal spacing between and around elements within each row. * @protected */ addElemSpacing_(): void; /** * Calculate the width of a spacer element in a row based on the previous and * next elements in that row. For instance, extra padding is added between two * editable fields. * @param {Blockly.blockRendering.Measurable} prev The element before the * spacer. * @param {Blockly.blockRendering.Measurable} next The element after the spacer. * @return {number} The size of the spacing between the two elements. * @protected */ getInRowSpacing_(prev: Blockly.blockRendering.Measurable, next: Blockly.blockRendering.Measurable): number; /** * Figure out where the right edge of the block and right edge of statement inputs * should be placed. * @protected */ computeBounds_(): void; /** * Extra spacing may be necessary to make sure that the right sides of all * rows line up. This can only be calculated after a first pass to calculate * the sizes of all rows. * @protected */ alignRowElements_(): void; /** * Modify the given row to add the given amount of padding around its fields. * The exact location of the padding is based on the alignment property of the * last input in the field. * @param {Blockly.blockRendering.Row} row The row to add padding to. * @param {number} missingSpace How much padding to add. * @protected */ addAlignmentPadding_(row: Blockly.blockRendering.Row, missingSpace: number): void; /** * Align the elements of a statement row based on computed bounds. * Unlike other types of rows, statement rows add space in multiple places. * @param {!Blockly.blockRendering.InputRow} row The statement row to resize. * @protected */ alignStatementRow_(row: Blockly.blockRendering.InputRow): void; /** * Add spacers between rows and set their sizes. * @protected */ addRowSpacing_(): void; /** * Create a spacer row to go between prev and next, and set its size. * @param {!Blockly.blockRendering.Row} prev The previous row. * @param {!Blockly.blockRendering.Row} next The next row. * @return {!Blockly.blockRendering.SpacerRow} The newly created spacer row. * @protected */ makeSpacerRow_(prev: Blockly.blockRendering.Row, next: Blockly.blockRendering.Row): Blockly.blockRendering.SpacerRow; /** * Calculate the width of a spacer row. * @param {!Blockly.blockRendering.Row} _prev The row before the spacer. * @param {!Blockly.blockRendering.Row} _next The row after the spacer. * @return {number} The desired width of the spacer row between these two rows. * @protected */ getSpacerRowWidth_(_prev: Blockly.blockRendering.Row, _next: Blockly.blockRendering.Row): number; /** * Calculate the height of a spacer row. * @param {!Blockly.blockRendering.Row} _prev The row before the spacer. * @param {!Blockly.blockRendering.Row} _next The row after the spacer. * @return {number} The desired height of the spacer row between these two rows. * @protected */ getSpacerRowHeight_(_prev: Blockly.blockRendering.Row, _next: Blockly.blockRendering.Row): number; /** * Calculate the centerline of an element in a rendered row. * This base implementation puts the centerline at the middle of the row * vertically, with no special cases. You will likely need extra logic to * handle (at minimum) top and bottom rows. * @param {!Blockly.blockRendering.Row} row The row containing the element. * @param {!Blockly.blockRendering.Measurable} elem The element to place. * @return {number} The desired centerline of the given element, as an offset * from the top left of the block. * @protected */ getElemCenterline_(row: Blockly.blockRendering.Row, elem: Blockly.blockRendering.Measurable): number; /** * Make any final changes to the rendering information object. In particular, * store the y position of each row, and record the height of the full block. * @protected */ finalize_(): void; } } declare module Blockly.blockRendering { interface IPathObject { } class PathObject extends PathObject__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class PathObject__Class implements Blockly.blockRendering.IPathObject { /** * An object that handles creating and setting each of the SVG elements * used by the renderer. * @param {!SVGElement} root The root SVG element. * @constructor * @implements {Blockly.blockRendering.IPathObject} * @package */ constructor(root: SVGElement); /** * The primary path of the block. * @type {SVGElement} * @package */ svgPath: SVGElement; /** * The light path of the block. * @type {SVGElement} * @package */ svgPathLight: SVGElement; /** * The dark path of the block. * @type {SVGElement} * @package */ svgPathDark: SVGElement; /** * Set the path generated by the renderer onto the respective SVG element. * @param {string} pathString The path. * @package */ setPaths(pathString: string): void; /** * Flip the SVG paths in RTL. * @package */ flipRTL(): void; } } declare module Blockly.blockRendering { class Renderer extends Renderer__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Renderer__Class { /** * The base class for a block renderer. * @package * @constructor */ constructor(); /** * Initialize the renderer. * @package */ init(): void; /** * Create a new instance of the renderer's constant provider. * @return {!Blockly.blockRendering.ConstantProvider} The constant provider. * @protected */ makeConstants_(): Blockly.blockRendering.ConstantProvider; /** * Create a new instance of the renderer's render info object. * @param {!Blockly.BlockSvg} block The block to measure. * @return {!Blockly.blockRendering.RenderInfo} The render info object. * @protected */ makeRenderInfo_(block: Blockly.BlockSvg): Blockly.blockRendering.RenderInfo; /** * Create a new instance of the renderer's drawer. * @param {!Blockly.BlockSvg} block The block to render. * @param {!Blockly.blockRendering.RenderInfo} info An object containing all * information needed to render this block. * @return {!Blockly.blockRendering.Drawer} The drawer. * @protected */ makeDrawer_(block: Blockly.BlockSvg, info: Blockly.blockRendering.RenderInfo): Blockly.blockRendering.Drawer; /** * Create a new instance of the renderer's debugger. * @return {!Blockly.blockRendering.Debug} The renderer debugger. * @protected */ makeDebugger_(): Blockly.blockRendering.Debug; /** * Create a new instance of the renderer's cursor drawer. * @param {!Blockly.WorkspaceSvg} workspace The workspace the cursor belongs to. * @param {boolean=} opt_marker True if the cursor is a marker. A marker is used * to save a location and is an immovable cursor. False or undefined if the * cursor is not a marker. * @return {!Blockly.CursorSvg} The cursor drawer. * @package */ makeCursorDrawer(workspace: Blockly.WorkspaceSvg, opt_marker?: boolean): Blockly.CursorSvg; /** * Create a new instance of a renderer path object. * @param {!SVGElement} root The root SVG element. * @return {!Blockly.blockRendering.IPathObject} The renderer path object. * @package */ makePathObject(root: SVGElement): Blockly.blockRendering.IPathObject; /** * Get the current renderer's constant provider. We assume that when this is * called, the renderer has already been initialized. * @return {!Blockly.blockRendering.ConstantProvider} The constant provider. * @package */ getConstants(): Blockly.blockRendering.ConstantProvider; /** * Render the block. * @param {!Blockly.BlockSvg} block The block to render. * @package */ render(block: Blockly.BlockSvg): void; } } declare module Blockly.blockRendering { class Measurable extends Measurable__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Measurable__Class { /** * The base class to represent a part of a block that takes up space during * rendering. The constructor for each non-spacer Measurable records the size * of the block element (e.g. field, statement input). * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor */ constructor(constants: Blockly.blockRendering.ConstantProvider); /** * The renderer's constant provider. * @type {!Blockly.blockRendering.ConstantProvider} * @protected */ constants_: Blockly.blockRendering.ConstantProvider; } } declare module Blockly.blockRendering { class Connection extends Connection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Connection__Class extends Blockly.blockRendering.Measurable__Class { /** * The base class to represent a connection and the space that it takes up on * the block. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {Blockly.RenderedConnection} connectionModel The connection object on * the block that this represents. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider, connectionModel: Blockly.RenderedConnection); } class OutputConnection extends OutputConnection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class OutputConnection__Class extends Blockly.blockRendering.Connection__Class { /** * An object containing information about the space an output connection takes * up during rendering. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {Blockly.RenderedConnection} connectionModel The connection object on * the block that this represents. * @package * @constructor * @extends {Blockly.blockRendering.Connection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, connectionModel: Blockly.RenderedConnection); /** * Whether or not the connection shape is dynamic. Dynamic shapes get their * height from the block. * @return {boolean} True if the connection shape is dynamic. */ isDynamic(): boolean; } class PreviousConnection extends PreviousConnection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class PreviousConnection__Class extends Blockly.blockRendering.Connection__Class { /** * An object containing information about the space a previous connection takes * up during rendering. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {Blockly.RenderedConnection} connectionModel The connection object on * the block that this represents. * @package * @constructor * @extends {Blockly.blockRendering.Connection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, connectionModel: Blockly.RenderedConnection); } class NextConnection extends NextConnection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class NextConnection__Class extends Blockly.blockRendering.Connection__Class { /** * An object containing information about the space a next connection takes * up during rendering. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {Blockly.RenderedConnection} connectionModel The connection object on * the block that this represents. * @package * @constructor * @extends {Blockly.blockRendering.Connection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, connectionModel: Blockly.RenderedConnection); } } declare module Blockly.blockRendering { class InputConnection extends InputConnection__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class InputConnection__Class extends Blockly.blockRendering.Connection__Class { /** * The base class to represent an input that takes up space on a block * during rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {!Blockly.Input} input The input to measure and store information for. * @package * @constructor * @extends {Blockly.blockRendering.Connection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, input: Blockly.Input); } class InlineInput extends InlineInput__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class InlineInput__Class extends Blockly.blockRendering.InputConnection__Class { /** * An object containing information about the space an inline input takes up * during rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {!Blockly.Input} input The inline input to measure and store * information for. * @package * @constructor * @extends {Blockly.blockRendering.InputConnection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, input: Blockly.Input); } class StatementInput extends StatementInput__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class StatementInput__Class extends Blockly.blockRendering.InputConnection__Class { /** * An object containing information about the space a statement input takes up * during rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {!Blockly.Input} input The statement input to measure and store * information for. * @package * @constructor * @extends {Blockly.blockRendering.InputConnection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, input: Blockly.Input); } class ExternalValueInput extends ExternalValueInput__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class ExternalValueInput__Class extends Blockly.blockRendering.InputConnection__Class { /** * An object containing information about the space an external value input * takes up during rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {!Blockly.Input} input The external value input to measure and store * information for. * @package * @constructor * @extends {Blockly.blockRendering.InputConnection} */ constructor(constants: Blockly.blockRendering.ConstantProvider, input: Blockly.Input); } } declare module Blockly.blockRendering { class Icon extends Icon__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Icon__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about the space an icon takes up during * rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {!Blockly.Icon} icon The icon to measure and store information for. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider, icon: Blockly.Icon); } class JaggedEdge extends JaggedEdge__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class JaggedEdge__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about the jagged edge of a collapsed block * takes up during rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider); } class Field extends Field__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Field__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about the space a field takes up during * rendering * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {!Blockly.Field} field The field to measure and store information for. * @param {!Blockly.Input} parentInput The parent input for the field. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider, field: Blockly.Field, parentInput: Blockly.Input); } class Hat extends Hat__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Hat__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about the space a hat takes up during * rendering. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider); } class SquareCorner extends SquareCorner__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class SquareCorner__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about the space a square corner takes up * during rendering. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {string=} opt_position The position of this corner. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider, opt_position?: string); } class RoundCorner extends RoundCorner__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class RoundCorner__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about the space a rounded corner takes up * during rendering. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {string=} opt_position The position of this corner. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider, opt_position?: string); } class InRowSpacer extends InRowSpacer__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class InRowSpacer__Class extends Blockly.blockRendering.Measurable__Class { /** * An object containing information about a spacer between two elements on a * row. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {number} width The width of the spacer. * @package * @constructor * @extends {Blockly.blockRendering.Measurable} */ constructor(constants: Blockly.blockRendering.ConstantProvider, width: number); } } declare module Blockly.blockRendering { class Row extends Row__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class Row__Class { /** * An object representing a single row on a rendered block and all of its * subcomponents. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor */ constructor(constants: Blockly.blockRendering.ConstantProvider); /** * The type of this rendering object. * @package * @type {number} */ type: number; /** * An array of elements contained in this row. * @package * @type {!Array.<!Blockly.blockRendering.Measurable>} */ elements: Blockly.blockRendering.Measurable[]; /** * The height of the row. * @package * @type {number} */ height: number; /** * The width of the row, from the left edge of the block to the right. * Does not include child blocks unless they are inline. * @package * @type {number} */ width: number; /** * The minimum height of the row. * @package * @type {number} */ minHeight: number; /** * The minimum width of the row, from the left edge of the block to the right. * Does not include child blocks unless they are inline. * @package * @type {number} */ minWidth: number; /** * The width of the row, from the left edge of the block to the edge of the * block or any connected child blocks. * @package * @type {number} */ widthWithConnectedBlocks: number; /** * The Y position of the row relative to the origin of the block's svg group. * @package * @type {number} */ yPos: number; /** * The X position of the row relative to the origin of the block's svg group. * @package * @type {number} */ xPos: number; /** * Whether the row has any external inputs. * @package * @type {boolean} */ hasExternalInput: boolean; /** * Whether the row has any statement inputs. * @package * @type {boolean} */ hasStatement: boolean; /** * Whether the row has any inline inputs. * @package * @type {boolean} */ hasInlineInput: boolean; /** * Whether the row has any dummy inputs. * @package * @type {boolean} */ hasDummyInput: boolean; /** * Whether the row has a jagged edge. * @package * @type {boolean} */ hasJaggedEdge: boolean; /** * The renderer's constant provider. * @type {!Blockly.blockRendering.ConstantProvider} * @protected */ constants_: Blockly.blockRendering.ConstantProvider; /** * Inspect all subcomponents and populate all size properties on the row. * @package */ measure(): void; /** * Get the last input on this row, if it has one. * @return {Blockly.blockRendering.InputConnection} The last input on the row, * or null. * @package */ getLastInput(): Blockly.blockRendering.InputConnection; /** * Determines whether this row should start with an element spacer. * @return {boolean} Whether the row should start with a spacer. * @package */ startsWithElemSpacer(): boolean; /** * Determines whether this row should end with an element spacer. * @return {boolean} Whether the row should end with a spacer. * @package */ endsWithElemSpacer(): boolean; /** * Convenience method to get the first spacer element on this row. * @return {Blockly.blockRendering.InRowSpacer} The first spacer element on * this row. * @package */ getFirstSpacer(): Blockly.blockRendering.InRowSpacer; /** * Convenience method to get the last spacer element on this row. * @return {Blockly.blockRendering.InRowSpacer} The last spacer element on * this row. * @package */ getLastSpacer(): Blockly.blockRendering.InRowSpacer; } class TopRow extends TopRow__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class TopRow__Class extends Blockly.blockRendering.Row__Class { /** * An object containing information about what elements are in the top row of a * block as well as sizing information for the top row. * Elements in a top row can consist of corners, hats, spacers, and previous * connections. * After this constructor is called, the row will contain all non-spacer * elements it needs. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor * @extends {Blockly.blockRendering.Row} */ constructor(constants: Blockly.blockRendering.ConstantProvider); /** * The starting point for drawing the row, in the y direction. * This allows us to draw hats and similar shapes that don't start at the * origin. Must be non-negative (see #2820). * @package * @type {number} */ capline: number; /** * How much the row extends up above its capline. * @type {number} */ ascenderHeight: number; /** * Whether the block has a previous connection. * @package * @type {boolean} */ hasPreviousConnection: boolean; /** * The previous connection on the block, if any. * @type {Blockly.blockRendering.PreviousConnection} */ connection: Blockly.blockRendering.PreviousConnection; /** * Returns whether or not the top row has a left square corner. * @param {!Blockly.BlockSvg} block The block whose top row this represents. * @returns {boolean} Whether or not the top row has a left square corner. */ hasLeftSquareCorner(block: Blockly.BlockSvg): boolean; } class BottomRow extends BottomRow__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class BottomRow__Class extends Blockly.blockRendering.Row__Class { /** * An object containing information about what elements are in the bottom row of * a block as well as spacing information for the top row. * Elements in a bottom row can consist of corners, spacers and next * connections. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor * @extends {Blockly.blockRendering.Row} */ constructor(constants: Blockly.blockRendering.ConstantProvider); /** * Whether this row has a next connection. * @package * @type {boolean} */ hasNextConnection: boolean; /** * The next connection on the row, if any. * @package * @type {Blockly.blockRendering.NextConnection} */ connection: Blockly.blockRendering.NextConnection; /** * The amount that the bottom of the block extends below the horizontal edge, * e.g. because of a next connection. Must be non-negative (see #2820). * @package * @type {number} */ descenderHeight: number; /** * The Y position of the bottom edge of the block, relative to the origin * of the block rendering. * @type {number} */ baseline: number; /** * Returns whether or not the bottom row has a left square corner. * @param {!Blockly.BlockSvg} block The block whose bottom row this represents. * @returns {boolean} Whether or not the bottom row has a left square corner. */ hasLeftSquareCorner(block: Blockly.BlockSvg): boolean; } class SpacerRow extends SpacerRow__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class SpacerRow__Class extends Blockly.blockRendering.Row__Class { /** * An object containing information about a spacer between two rows. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @param {number} height The height of the spacer. * @param {number} width The width of the spacer. * @package * @constructor * @extends {Blockly.blockRendering.Row} */ constructor(constants: Blockly.blockRendering.ConstantProvider, height: number, width: number); } class InputRow extends InputRow__Class { } /** Fake class which should be extended to avoid inheriting static properties */ class InputRow__Class extends Blockly.blockRendering.Row__Class { /** * An object containing information about a row that holds one or more inputs. * @param {!Blockly.blockRendering.ConstantProvider} constants The rendering * constants provider. * @package * @constructor * @extends {Blockly.blockRendering.Row} */ constructor(constants: Blockly.blockRendering.ConstantProvider); /** * The total width of all blocks connected to this row. * @type {number} * @package */ connectedBlockWidths: number; /** * Inspect all subcomponents and populate all size properties on the row. * @package */ measure(): void; } } declare module Blockly.blockRendering { /** * Types of rendering elements. * @enum {number} * @package */ enum Types { NONE, FIELD, HAT, ICON, SPACER, BETWEEN_ROW_SPACER, IN_ROW_SPACER, EXTERNAL_VALUE_INPUT, INPUT, INLINE_INPUT, STATEMENT_INPUT, CONNECTION, PREVIOUS_CONNECTION, NEXT_CONNECTION, OUTPUT_CONNECTION, CORNER, LEFT_SQUARE_CORNER, LEFT_ROUND_CORNER, RIGHT_SQUARE_CORNER, RIGHT_ROUND_CORNER, JAGGED_EDGE, ROW, TOP_ROW, BOTTOM_ROW, INPUT_ROW } } declare module Blockly.blockRendering.Types { /** * A Left Corner Union Type. * @type {number} * @const * @package */ var LEFT_CORNER: number; /** * A Right Corner Union Type. * @type {number} * @const * @package */ var RIGHT_CORNER: number; /** * Get the enum flag value of an existing type or register a new type. * @param {!string} type The name of the type. * @return {!number} The enum flag value assosiated with that type. * @package */ function getType(type: string): number; /** * Whether a measurable stores information about a field. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a field. * @package */ function isField(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a hat. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a hat. * @package */ function isHat(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about an icon. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about an icon. * @package */ function isIcon(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a spacer. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a spacer. * @package */ function isSpacer(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about an in-row spacer. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about an * in-row spacer. * @package */ function isInRowSpacer(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about an input. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about an input. * @package */ function isInput(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about an external input. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about an * external input. * @package */ function isExternalInput(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about an inline input. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about an * inline input. * @package */ function isInlineInput(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a statement input. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * statement input. * @package */ function isStatementInput(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a previous connection. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * previous connection. * @package */ function isPreviousConnection(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a next connection. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * next connection. * @package */ function isNextConnection(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a previous or next connection. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a previous or * next connection. * @package */ function isPreviousOrNextConnection(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a left round corner. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * left round corner. * @package */ function isLeftRoundedCorner(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a right round corner. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * right round corner. * @package */ function isRightRoundedCorner(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a left square corner. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * left square corner. * @package */ function isLeftSquareCorner(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a right square corner. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * right square corner. * @package */ function isRightSquareCorner(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a corner. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a * corner. * @package */ function isCorner(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a jagged edge. * @param {!Blockly.blockRendering.Measurable} elem The element to check. * @return {number} 1 if the object stores information about a jagged edge. * @package */ function isJaggedEdge(elem: Blockly.blockRendering.Measurable): number; /** * Whether a measurable stores information about a row. * @param {!Blockly.blockRendering.Row} row The row to check. * @return {number} 1 if the object stores information about a row. * @package */ function isRow(row: Blockly.blockRendering.Row): number; /** * Whether a measurable stores information about a between-row spacer. * @param {!Blockly.blockRendering.Row} row The row to check. * @return {number} 1 if the object stores information about a * between-row spacer. * @package */ function isBetweenRowSpacer(row: Blockly.blockRendering.Row): number; /** * Whether a measurable stores information about a top row. * @param {!Blockly.blockRendering.Row} row The row to check. * @return {number} 1 if the object stores information about a top row. * @package */ function isTopRow(row: Blockly.blockRendering.Row): number; /** * Whether a measurable stores information about a bottom row. * @param {!Blockly.blockRendering.Row} row The row to check. * @return {number} 1 if the object stores information about a bottom row. * @package */ function isBottomRow(row: Blockly.blockRendering.Row): number; /** * Whether a measurable stores information about a top or bottom row. * @param {!Blockly.blockRendering.Row} row The row to check. * @return {number} 1 if the object stores information about a top or * bottom row. * @package */ function isTopOrBottomRow(row: Blockly.blockRendering.Row): number; /** * Whether a measurable stores information about an input row. * @param {!Blockly.blockRendering.Row} row The row to check. * @return {number} 1 if the object stores information about an input row. * @package */ function isInputRow(row: Blockly.blockRendering.Row): number; } declare module Blockly.Msg { /** @type {string} */ var LOGIC_HUE: string; /** @type {string} */ var LOOPS_HUE: string; /** @type {string} */ var MATH_HUE: string; /** @type {string} */ var TEXTS_HUE: string; /** @type {string} */ var LISTS_HUE: string; /** @type {string} */ var COLOUR_HUE: string; /** @type {string} */ var VARIABLES_HUE: string; /** @type {string} */ var VARIABLES_DYNAMIC_HUE: string; /** @type {string} */ var PROCEDURES_HUE: string; /** @type {string} */ var VARIABLES_DEFAULT_NAME: string; /** @type {string} */ var UNNAMED_KEY: string; /** @type {string} */ var TODAY: string; /** @type {string} */ var DUPLICATE_BLOCK: string; /** @type {string} */ var ADD_COMMENT: string; /** @type {string} */ var REMOVE_COMMENT: string; /** @type {string} */ var DUPLICATE_COMMENT: string; /** @type {string} */ var EXTERNAL_INPUTS: string; /** @type {string} */ var INLINE_INPUTS: string; /** @type {string} */ var DELETE_BLOCK: string; /** @type {string} */ var DELETE_X_BLOCKS: string; /** @type {string} */ var DELETE_ALL_BLOCKS: string; /** @type {string} */ var CLEAN_UP: string; /** @type {string} */ var COLLAPSE_BLOCK: string; /** @type {string} */ var COLLAPSE_ALL: string; /** @type {string} */ var EXPAND_BLOCK: string; /** @type {string} */ var EXPAND_ALL: string; /** @type {string} */ var DISABLE_BLOCK: string; /** @type {string} */ var ENABLE_BLOCK: string; /** @type {string} */ var HELP: string; /** @type {string} */ var UNDO: string; /** @type {string} */ var REDO: string; /** @type {string} */ var CHANGE_VALUE_TITLE: string; /** @type {string} */ var RENAME_VARIABLE: string; /** @type {string} */ var RENAME_VARIABLE_TITLE: string; /** @type {string} */ var NEW_VARIABLE: string; /** @type {string} */ var NEW_STRING_VARIABLE: string; /** @type {string} */ var NEW_NUMBER_VARIABLE: string; /** @type {string} */ var NEW_COLOUR_VARIABLE: string; /** @type {string} */ var NEW_VARIABLE_TYPE_TITLE: string; /** @type {string} */ var NEW_VARIABLE_TITLE: string; /** @type {string} */ var VARIABLE_ALREADY_EXISTS: string; /** @type {string} */ var VARIABLE_ALREADY_EXISTS_FOR_ANOTHER_TYPE: string; /** @type {string} */ var DELETE_VARIABLE_CONFIRMATION: string; /** @type {string} */ var CANNOT_DELETE_VARIABLE_PROCEDURE: string; /** @type {string} */ var DELETE_VARIABLE: string; /** @type {string} */ var COLOUR_PICKER_HELPURL: string; /** @type {string} */ var COLOUR_PICKER_TOOLTIP: string; /** @type {string} */ var COLOUR_RANDOM_HELPURL: string; /** @type {string} */ var COLOUR_RANDOM_TITLE: string; /** @type {string} */ var COLOUR_RANDOM_TOOLTIP: string; /** @type {string} */ var COLOUR_RGB_HELPURL: string; /** @type {string} */ var COLOUR_RGB_TITLE: string; /** @type {string} */ var COLOUR_RGB_RED: string; /** @type {string} */ var COLOUR_RGB_GREEN: string; /** @type {string} */ var COLOUR_RGB_BLUE: string; /** @type {string} */ var COLOUR_RGB_TOOLTIP: string; /** @type {string} */ var COLOUR_BLEND_HELPURL: string; /** @type {string} */ var COLOUR_BLEND_TITLE: string; /** @type {string} */ var COLOUR_BLEND_COLOUR1: string; /** @type {string} */ var COLOUR_BLEND_COLOUR2: string; /** @type {string} */ var COLOUR_BLEND_RATIO: string; /** @type {string} */ var COLOUR_BLEND_TOOLTIP: string; /** @type {string} */ var CONTROLS_REPEAT_HELPURL: string; /** @type {string} */ var CONTROLS_REPEAT_TITLE: string; /** @type {string} */ var CONTROLS_REPEAT_INPUT_DO: string; /** @type {string} */ var CONTROLS_REPEAT_TOOLTIP: string; /** @type {string} */ var CONTROLS_WHILEUNTIL_HELPURL: string; /** @type {string} */ var CONTROLS_WHILEUNTIL_INPUT_DO: string; /** @type {string} */ var CONTROLS_WHILEUNTIL_OPERATOR_WHILE: string; /** @type {string} */ var CONTROLS_WHILEUNTIL_OPERATOR_UNTIL: string; /** @type {string} */ var CONTROLS_WHILEUNTIL_TOOLTIP_WHILE: string; /** @type {string} */ var CONTROLS_WHILEUNTIL_TOOLTIP_UNTIL: string; /** @type {string} */ var CONTROLS_FOR_HELPURL: string; /** @type {string} */ var CONTROLS_FOR_TOOLTIP: string; /** @type {string} */ var CONTROLS_FOR_TITLE: string; /** @type {string} */ var CONTROLS_FOR_INPUT_DO: string; /** @type {string} */ var CONTROLS_FOREACH_HELPURL: string; /** @type {string} */ var CONTROLS_FOREACH_TITLE: string; /** @type {string} */ var CONTROLS_FOREACH_INPUT_DO: string; /** @type {string} */ var CONTROLS_FOREACH_TOOLTIP: string; /** @type {string} */ var CONTROLS_FLOW_STATEMENTS_HELPURL: string; /** @type {string} */ var CONTROLS_FLOW_STATEMENTS_OPERATOR_BREAK: string; /** @type {string} */ var CONTROLS_FLOW_STATEMENTS_OPERATOR_CONTINUE: string; /** @type {string} */ var CONTROLS_FLOW_STATEMENTS_TOOLTIP_BREAK: string; /** @type {string} */ var CONTROLS_FLOW_STATEMENTS_TOOLTIP_CONTINUE: string; /** @type {string} */ var CONTROLS_FLOW_STATEMENTS_WARNING: string; /** @type {string} */ var CONTROLS_IF_HELPURL: string; /** @type {string} */ var CONTROLS_IF_TOOLTIP_1: string; /** @type {string} */ var CONTROLS_IF_TOOLTIP_2: string; /** @type {string} */ var CONTROLS_IF_TOOLTIP_3: string; /** @type {string} */ var CONTROLS_IF_TOOLTIP_4: string; /** @type {string} */ var CONTROLS_IF_MSG_IF: string; /** @type {string} */ var CONTROLS_IF_MSG_ELSEIF: string; /** @type {string} */ var CONTROLS_IF_MSG_ELSE: string; /** @type {string} */ var CONTROLS_IF_MSG_THEN: string; /** @type {string} */ var CONTROLS_IF_IF_TITLE_IF: string; /** @type {string} */ var CONTROLS_IF_IF_TOOLTIP: string; /** @type {string} */ var CONTROLS_IF_ELSEIF_TITLE_ELSEIF: string; /** @type {string} */ var CONTROLS_IF_ELSEIF_TOOLTIP: string; /** @type {string} */ var CONTROLS_IF_ELSE_TITLE_ELSE: string; /** @type {string} */ var CONTROLS_IF_ELSE_TOOLTIP: string; /** @type {string} */ var IOS_OK: string; /** @type {string} */ var IOS_CANCEL: string; /** @type {string} */ var IOS_ERROR: string; /** @type {string} */ var IOS_PROCEDURES_INPUTS: string; /** @type {string} */ var IOS_PROCEDURES_ADD_INPUT: string; /** @type {string} */ var IOS_PROCEDURES_ALLOW_STATEMENTS: string; /** @type {string} */ var IOS_PROCEDURES_DUPLICATE_INPUTS_ERROR: string; /** @type {string} */ var IOS_VARIABLES_ADD_VARIABLE: string; /** @type {string} */ var IOS_VARIABLES_ADD_BUTTON: string; /** @type {string} */ var IOS_VARIABLES_RENAME_BUTTON: string; /** @type {string} */ var IOS_VARIABLES_DELETE_BUTTON: string; /** @type {string} */ var IOS_VARIABLES_VARIABLE_NAME: string; /** @type {string} */ var IOS_VARIABLES_EMPTY_NAME_ERROR: string; /** @type {string} */ var LOGIC_COMPARE_HELPURL: string; /** @type {string} */ var LOGIC_COMPARE_TOOLTIP_EQ: string; /** @type {string} */ var LOGIC_COMPARE_TOOLTIP_NEQ: string; /** @type {string} */ var LOGIC_COMPARE_TOOLTIP_LT: string; /** @type {string} */ var LOGIC_COMPARE_TOOLTIP_LTE: string; /** @type {string} */ var LOGIC_COMPARE_TOOLTIP_GT: string; /** @type {string} */ var LOGIC_COMPARE_TOOLTIP_GTE: string; /** @type {string} */ var LOGIC_OPERATION_HELPURL: string; /** @type {string} */ var LOGIC_OPERATION_TOOLTIP_AND: string; /** @type {string} */ var LOGIC_OPERATION_AND: string; /** @type {string} */ var LOGIC_OPERATION_TOOLTIP_OR: string; /** @type {string} */ var LOGIC_OPERATION_OR: string; /** @type {string} */ var LOGIC_NEGATE_HELPURL: string; /** @type {string} */ var LOGIC_NEGATE_TITLE: string; /** @type {string} */ var LOGIC_NEGATE_TOOLTIP: string; /** @type {string} */ var LOGIC_BOOLEAN_HELPURL: string; /** @type {string} */ var LOGIC_BOOLEAN_TRUE: string; /** @type {string} */ var LOGIC_BOOLEAN_FALSE: string; /** @type {string} */ var LOGIC_BOOLEAN_TOOLTIP: string; /** @type {string} */ var LOGIC_NULL_HELPURL: string; /** @type {string} */ var LOGIC_NULL: string; /** @type {string} */ var LOGIC_NULL_TOOLTIP: string; /** @type {string} */ var LOGIC_TERNARY_HELPURL: string; /** @type {string} */ var LOGIC_TERNARY_CONDITION: string; /** @type {string} */ var LOGIC_TERNARY_IF_TRUE: string; /** @type {string} */ var LOGIC_TERNARY_IF_FALSE: string; /** @type {string} */ var LOGIC_TERNARY_TOOLTIP: string; /** @type {string} */ var MATH_NUMBER_HELPURL: string; /** @type {string} */ var MATH_NUMBER_TOOLTIP: string; /** @type {string} */ var MATH_ADDITION_SYMBOL: string; /** @type {string} */ var MATH_SUBTRACTION_SYMBOL: string; /** @type {string} */ var MATH_DIVISION_SYMBOL: string; /** @type {string} */ var MATH_MULTIPLICATION_SYMBOL: string; /** @type {string} */ var MATH_POWER_SYMBOL: string; /** @type {string} */ var MATH_TRIG_SIN: string; /** @type {string} */ var MATH_TRIG_COS: string; /** @type {string} */ var MATH_TRIG_TAN: string; /** @type {string} */ var MATH_TRIG_ASIN: string; /** @type {string} */ var MATH_TRIG_ACOS: string; /** @type {string} */ var MATH_TRIG_ATAN: string; /** @type {string} */ var MATH_ARITHMETIC_HELPURL: string; /** @type {string} */ var MATH_ARITHMETIC_TOOLTIP_ADD: string; /** @type {string} */ var MATH_ARITHMETIC_TOOLTIP_MINUS: string; /** @type {string} */ var MATH_ARITHMETIC_TOOLTIP_MULTIPLY: string; /** @type {string} */ var MATH_ARITHMETIC_TOOLTIP_DIVIDE: string; /** @type {string} */ var MATH_ARITHMETIC_TOOLTIP_POWER: string; /** @type {string} */ var MATH_SINGLE_HELPURL: string; /** @type {string} */ var MATH_SINGLE_OP_ROOT: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_ROOT: string; /** @type {string} */ var MATH_SINGLE_OP_ABSOLUTE: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_ABS: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_NEG: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_LN: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_LOG10: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_EXP: string; /** @type {string} */ var MATH_SINGLE_TOOLTIP_POW10: string; /** @type {string} */ var MATH_TRIG_HELPURL: string; /** @type {string} */ var MATH_TRIG_TOOLTIP_SIN: string; /** @type {string} */ var MATH_TRIG_TOOLTIP_COS: string; /** @type {string} */ var MATH_TRIG_TOOLTIP_TAN: string; /** @type {string} */ var MATH_TRIG_TOOLTIP_ASIN: string; /** @type {string} */ var MATH_TRIG_TOOLTIP_ACOS: string; /** @type {string} */ var MATH_TRIG_TOOLTIP_ATAN: string; /** @type {string} */ var MATH_CONSTANT_HELPURL: string; /** @type {string} */ var MATH_CONSTANT_TOOLTIP: string; /** @type {string} */ var MATH_IS_EVEN: string; /** @type {string} */ var MATH_IS_ODD: string; /** @type {string} */ var MATH_IS_PRIME: string; /** @type {string} */ var MATH_IS_WHOLE: string; /** @type {string} */ var MATH_IS_POSITIVE: string; /** @type {string} */ var MATH_IS_NEGATIVE: string; /** @type {string} */ var MATH_IS_DIVISIBLE_BY: string; /** @type {string} */ var MATH_IS_TOOLTIP: string; /** @type {string} */ var MATH_CHANGE_HELPURL: string; /** @type {string} */ var MATH_CHANGE_TITLE: string; /** @type {string} */ var MATH_CHANGE_TITLE_ITEM: string; /** @type {string} */ var MATH_CHANGE_TOOLTIP: string; /** @type {string} */ var MATH_ROUND_HELPURL: string; /** @type {string} */ var MATH_ROUND_TOOLTIP: string; /** @type {string} */ var MATH_ROUND_OPERATOR_ROUND: string; /** @type {string} */ var MATH_ROUND_OPERATOR_ROUNDUP: string; /** @type {string} */ var MATH_ROUND_OPERATOR_ROUNDDOWN: string; /** @type {string} */ var MATH_ONLIST_HELPURL: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_SUM: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_SUM: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_MIN: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_MIN: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_MAX: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_MAX: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_AVERAGE: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_AVERAGE: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_MEDIAN: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_MEDIAN: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_MODE: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_MODE: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_STD_DEV: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_STD_DEV: string; /** @type {string} */ var MATH_ONLIST_OPERATOR_RANDOM: string; /** @type {string} */ var MATH_ONLIST_TOOLTIP_RANDOM: string; /** @type {string} */ var MATH_MODULO_HELPURL: string; /** @type {string} */ var MATH_MODULO_TITLE: string; /** @type {string} */ var MATH_MODULO_TOOLTIP: string; /** @type {string} */ var MATH_CONSTRAIN_HELPURL: string; /** @type {string} */ var MATH_CONSTRAIN_TITLE: string; /** @type {string} */ var MATH_CONSTRAIN_TOOLTIP: string; /** @type {string} */ var MATH_RANDOM_INT_HELPURL: string; /** @type {string} */ var MATH_RANDOM_INT_TITLE: string; /** @type {string} */ var MATH_RANDOM_INT_TOOLTIP: string; /** @type {string} */ var MATH_RANDOM_FLOAT_HELPURL: string; /** @type {string} */ var MATH_RANDOM_FLOAT_TITLE_RANDOM: string; /** @type {string} */ var MATH_RANDOM_FLOAT_TOOLTIP: string; /** @type {string} */ var MATH_ATAN2_HELPURL: string; /** @type {string} */ var MATH_ATAN2_TITLE: string; /** @type {string} */ var MATH_ATAN2_TOOLTIP: string; /** @type {string} */ var TEXT_TEXT_HELPURL: string; /** @type {string} */ var TEXT_TEXT_TOOLTIP: string; /** @type {string} */ var TEXT_JOIN_HELPURL: string; /** @type {string} */ var TEXT_JOIN_TITLE_CREATEWITH: string; /** @type {string} */ var TEXT_JOIN_TOOLTIP: string; /** @type {string} */ var TEXT_CREATE_JOIN_TITLE_JOIN: string; /** @type {string} */ var TEXT_CREATE_JOIN_TOOLTIP: string; /** @type {string} */ var TEXT_CREATE_JOIN_ITEM_TITLE_ITEM: string; /** @type {string} */ var TEXT_CREATE_JOIN_ITEM_TOOLTIP: string; /** @type {string} */ var TEXT_APPEND_HELPURL: string; /** @type {string} */ var TEXT_APPEND_TITLE: string; /** @type {string} */ var TEXT_APPEND_VARIABLE: string; /** @type {string} */ var TEXT_APPEND_TOOLTIP: string; /** @type {string} */ var TEXT_LENGTH_HELPURL: string; /** @type {string} */ var TEXT_LENGTH_TITLE: string; /** @type {string} */ var TEXT_LENGTH_TOOLTIP: string; /** @type {string} */ var TEXT_ISEMPTY_HELPURL: string; /** @type {string} */ var TEXT_ISEMPTY_TITLE: string; /** @type {string} */ var TEXT_ISEMPTY_TOOLTIP: string; /** @type {string} */ var TEXT_INDEXOF_HELPURL: string; /** @type {string} */ var TEXT_INDEXOF_TOOLTIP: string; /** @type {string} */ var TEXT_INDEXOF_TITLE: string; /** @type {string} */ var TEXT_INDEXOF_OPERATOR_FIRST: string; /** @type {string} */ var TEXT_INDEXOF_OPERATOR_LAST: string; /** @type {string} */ var TEXT_CHARAT_HELPURL: string; /** @type {string} */ var TEXT_CHARAT_TITLE: string; /** @type {string} */ var TEXT_CHARAT_FROM_START: string; /** @type {string} */ var TEXT_CHARAT_FROM_END: string; /** @type {string} */ var TEXT_CHARAT_FIRST: string; /** @type {string} */ var TEXT_CHARAT_LAST: string; /** @type {string} */ var TEXT_CHARAT_RANDOM: string; /** @type {string} */ var TEXT_CHARAT_TAIL: string; /** @type {string} */ var TEXT_CHARAT_TOOLTIP: string; /** @type {string} */ var TEXT_GET_SUBSTRING_TOOLTIP: string; /** @type {string} */ var TEXT_GET_SUBSTRING_HELPURL: string; /** @type {string} */ var TEXT_GET_SUBSTRING_INPUT_IN_TEXT: string; /** @type {string} */ var TEXT_GET_SUBSTRING_START_FROM_START: string; /** @type {string} */ var TEXT_GET_SUBSTRING_START_FROM_END: string; /** @type {string} */ var TEXT_GET_SUBSTRING_START_FIRST: string; /** @type {string} */ var TEXT_GET_SUBSTRING_END_FROM_START: string; /** @type {string} */ var TEXT_GET_SUBSTRING_END_FROM_END: string; /** @type {string} */ var TEXT_GET_SUBSTRING_END_LAST: string; /** @type {string} */ var TEXT_GET_SUBSTRING_TAIL: string; /** @type {string} */ var TEXT_CHANGECASE_HELPURL: string; /** @type {string} */ var TEXT_CHANGECASE_TOOLTIP: string; /** @type {string} */ var TEXT_CHANGECASE_OPERATOR_UPPERCASE: string; /** @type {string} */ var TEXT_CHANGECASE_OPERATOR_LOWERCASE: string; /** @type {string} */ var TEXT_CHANGECASE_OPERATOR_TITLECASE: string; /** @type {string} */ var TEXT_TRIM_HELPURL: string; /** @type {string} */ var TEXT_TRIM_TOOLTIP: string; /** @type {string} */ var TEXT_TRIM_OPERATOR_BOTH: string; /** @type {string} */ var TEXT_TRIM_OPERATOR_LEFT: string; /** @type {string} */ var TEXT_TRIM_OPERATOR_RIGHT: string; /** @type {string} */ var TEXT_PRINT_HELPURL: string; /** @type {string} */ var TEXT_PRINT_TITLE: string; /** @type {string} */ var TEXT_PRINT_TOOLTIP: string; /** @type {string} */ var TEXT_PROMPT_HELPURL: string; /** @type {string} */ var TEXT_PROMPT_TYPE_TEXT: string; /** @type {string} */ var TEXT_PROMPT_TYPE_NUMBER: string; /** @type {string} */ var TEXT_PROMPT_TOOLTIP_NUMBER: string; /** @type {string} */ var TEXT_PROMPT_TOOLTIP_TEXT: string; /** @type {string} */ var TEXT_COUNT_MESSAGE0: string; /** @type {string} */ var TEXT_COUNT_HELPURL: string; /** @type {string} */ var TEXT_COUNT_TOOLTIP: string; /** @type {string} */ var TEXT_REPLACE_MESSAGE0: string; /** @type {string} */ var TEXT_REPLACE_HELPURL: string; /** @type {string} */ var TEXT_REPLACE_TOOLTIP: string; /** @type {string} */ var TEXT_REVERSE_MESSAGE0: string; /** @type {string} */ var TEXT_REVERSE_HELPURL: string; /** @type {string} */ var TEXT_REVERSE_TOOLTIP: string; /** @type {string} */ var LISTS_CREATE_EMPTY_HELPURL: string; /** @type {string} */ var LISTS_CREATE_EMPTY_TITLE: string; /** @type {string} */ var LISTS_CREATE_EMPTY_TOOLTIP: string; /** @type {string} */ var LISTS_CREATE_WITH_HELPURL: string; /** @type {string} */ var LISTS_CREATE_WITH_TOOLTIP: string; /** @type {string} */ var LISTS_CREATE_WITH_INPUT_WITH: string; /** @type {string} */ var LISTS_CREATE_WITH_CONTAINER_TITLE_ADD: string; /** @type {string} */ var LISTS_CREATE_WITH_CONTAINER_TOOLTIP: string; /** @type {string} */ var LISTS_CREATE_WITH_ITEM_TITLE: string; /** @type {string} */ var LISTS_CREATE_WITH_ITEM_TOOLTIP: string; /** @type {string} */ var LISTS_REPEAT_HELPURL: string; /** @type {string} */ var LISTS_REPEAT_TOOLTIP: string; /** @type {string} */ var LISTS_REPEAT_TITLE: string; /** @type {string} */ var LISTS_LENGTH_HELPURL: string; /** @type {string} */ var LISTS_LENGTH_TITLE: string; /** @type {string} */ var LISTS_LENGTH_TOOLTIP: string; /** @type {string} */ var LISTS_ISEMPTY_HELPURL: string; /** @type {string} */ var LISTS_ISEMPTY_TITLE: string; /** @type {string} */ var LISTS_ISEMPTY_TOOLTIP: string; /** @type {string} */ var LISTS_INLIST: string; /** @type {string} */ var LISTS_INDEX_OF_HELPURL: string; /** @type {string} */ var LISTS_INDEX_OF_INPUT_IN_LIST: string; /** @type {string} */ var LISTS_INDEX_OF_FIRST: string; /** @type {string} */ var LISTS_INDEX_OF_LAST: string; /** @type {string} */ var LISTS_INDEX_OF_TOOLTIP: string; /** @type {string} */ var LISTS_GET_INDEX_HELPURL: string; /** @type {string} */ var LISTS_GET_INDEX_GET: string; /** @type {string} */ var LISTS_GET_INDEX_GET_REMOVE: string; /** @type {string} */ var LISTS_GET_INDEX_REMOVE: string; /** @type {string} */ var LISTS_GET_INDEX_FROM_START: string; /** @type {string} */ var LISTS_GET_INDEX_FROM_END: string; /** @type {string} */ var LISTS_GET_INDEX_FIRST: string; /** @type {string} */ var LISTS_GET_INDEX_LAST: string; /** @type {string} */ var LISTS_GET_INDEX_RANDOM: string; /** @type {string} */ var LISTS_GET_INDEX_TAIL: string; /** @type {string} */ var LISTS_GET_INDEX_INPUT_IN_LIST: string; /** @type {string} */ var LISTS_INDEX_FROM_START_TOOLTIP: string; /** @type {string} */ var LISTS_INDEX_FROM_END_TOOLTIP: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_FROM: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_FIRST: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_LAST: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_RANDOM: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_FROM: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_FIRST: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_LAST: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_RANDOM: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_REMOVE_FROM: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_REMOVE_FIRST: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_REMOVE_LAST: string; /** @type {string} */ var LISTS_GET_INDEX_TOOLTIP_REMOVE_RANDOM: string; /** @type {string} */ var LISTS_SET_INDEX_HELPURL: string; /** @type {string} */ var LISTS_SET_INDEX_INPUT_IN_LIST: string; /** @type {string} */ var LISTS_SET_INDEX_SET: string; /** @type {string} */ var LISTS_SET_INDEX_INSERT: string; /** @type {string} */ var LISTS_SET_INDEX_INPUT_TO: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_SET_FROM: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_SET_FIRST: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_SET_LAST: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_SET_RANDOM: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_INSERT_FROM: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_INSERT_FIRST: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_INSERT_LAST: string; /** @type {string} */ var LISTS_SET_INDEX_TOOLTIP_INSERT_RANDOM: string; /** @type {string} */ var LISTS_GET_SUBLIST_HELPURL: string; /** @type {string} */ var LISTS_GET_SUBLIST_INPUT_IN_LIST: string; /** @type {string} */ var LISTS_GET_SUBLIST_START_FROM_START: string; /** @type {string} */ var LISTS_GET_SUBLIST_START_FROM_END: string; /** @type {string} */ var LISTS_GET_SUBLIST_START_FIRST: string; /** @type {string} */ var LISTS_GET_SUBLIST_END_FROM_START: string; /** @type {string} */ var LISTS_GET_SUBLIST_END_FROM_END: string; /** @type {string} */ var LISTS_GET_SUBLIST_END_LAST: string; /** @type {string} */ var LISTS_GET_SUBLIST_TAIL: string; /** @type {string} */ var LISTS_GET_SUBLIST_TOOLTIP: string; /** @type {string} */ var LISTS_SORT_HELPURL: string; /** @type {string} */ var LISTS_SORT_TITLE: string; /** @type {string} */ var LISTS_SORT_TOOLTIP: string; /** @type {string} */ var LISTS_SORT_ORDER_ASCENDING: string; /** @type {string} */ var LISTS_SORT_ORDER_DESCENDING: string; /** @type {string} */ var LISTS_SORT_TYPE_NUMERIC: string; /** @type {string} */ var LISTS_SORT_TYPE_TEXT: string; /** @type {string} */ var LISTS_SORT_TYPE_IGNORECASE: string; /** @type {string} */ var LISTS_SPLIT_HELPURL: string; /** @type {string} */ var LISTS_SPLIT_LIST_FROM_TEXT: string; /** @type {string} */ var LISTS_SPLIT_TEXT_FROM_LIST: string; /** @type {string} */ var LISTS_SPLIT_WITH_DELIMITER: string; /** @type {string} */ var LISTS_SPLIT_TOOLTIP_SPLIT: string; /** @type {string} */ var LISTS_SPLIT_TOOLTIP_JOIN: string; /** @type {string} */ var LISTS_REVERSE_HELPURL: string; /** @type {string} */ var LISTS_REVERSE_MESSAGE0: string; /** @type {string} */ var LISTS_REVERSE_TOOLTIP: string; /** @type {string} */ var ORDINAL_NUMBER_SUFFIX: string; /** @type {string} */ var VARIABLES_GET_HELPURL: string; /** @type {string} */ var VARIABLES_GET_TOOLTIP: string; /** @type {string} */ var VARIABLES_GET_CREATE_SET: string; /** @type {string} */ var VARIABLES_SET_HELPURL: string; /** @type {string} */ var VARIABLES_SET: string; /** @type {string} */ var VARIABLES_SET_TOOLTIP: string; /** @type {string} */ var VARIABLES_SET_CREATE_GET: string; /** @type {string} */ var PROCEDURES_DEFNORETURN_HELPURL: string; /** @type {string} */ var PROCEDURES_DEFNORETURN_TITLE: string; /** @type {string} */ var PROCEDURES_DEFNORETURN_PROCEDURE: string; /** @type {string} */ var PROCEDURES_BEFORE_PARAMS: string; /** @type {string} */ var PROCEDURES_CALL_BEFORE_PARAMS: string; /** @type {string} */ var PROCEDURES_DEFNORETURN_DO: string; /** @type {string} */ var PROCEDURES_DEFNORETURN_TOOLTIP: string; /** @type {string} */ var PROCEDURES_DEFNORETURN_COMMENT: string; /** @type {string} */ var PROCEDURES_DEFRETURN_HELPURL: string; /** @type {string} */ var PROCEDURES_DEFRETURN_TITLE: string; /** @type {string} */ var PROCEDURES_DEFRETURN_PROCEDURE: string; /** @type {string} */ var PROCEDURES_DEFRETURN_DO: string; /** @type {string} */ var PROCEDURES_DEFRETURN_COMMENT: string; /** @type {string} */ var PROCEDURES_DEFRETURN_RETURN: string; /** @type {string} */ var PROCEDURES_DEFRETURN_TOOLTIP: string; /** @type {string} */ var PROCEDURES_ALLOW_STATEMENTS: string; /** @type {string} */ var PROCEDURES_DEF_DUPLICATE_WARNING: string; /** @type {string} */ var PROCEDURES_CALLNORETURN_HELPURL: string; /** @type {string} */ var PROCEDURES_CALLNORETURN_TOOLTIP: string; /** @type {string} */ var PROCEDURES_CALLRETURN_HELPURL: string; /** @type {string} */ var PROCEDURES_CALLRETURN_TOOLTIP: string; /** @type {string} */ var PROCEDURES_MUTATORCONTAINER_TITLE: string; /** @type {string} */ var PROCEDURES_MUTATORCONTAINER_TOOLTIP: string; /** @type {string} */ var PROCEDURES_MUTATORARG_TITLE: string; /** @type {string} */ var PROCEDURES_MUTATORARG_TOOLTIP: string; /** @type {string} */ var PROCEDURES_HIGHLIGHT_DEF: string; /** @type {string} */ var PROCEDURES_CREATE_DO: string; /** @type {string} */ var PROCEDURES_IFRETURN_TOOLTIP: string; /** @type {string} */ var PROCEDURES_IFRETURN_HELPURL: string; /** @type {string} */ var PROCEDURES_IFRETURN_WARNING: string; /** @type {string} */ var WORKSPACE_COMMENT_DEFAULT_TEXT: string; /** @type {string} */ var COLLAPSED_WARNINGS_WARNING: string; }
* Override this method if the field's html input representation is different * than the field's value. This should be coupled with an override of
cart_logparse.py
#!/usr/bin/env python3 # Copyright (C) 2018-2019 Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted for any purpose (including commercial purposes) # provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or materials provided with the distribution. # # 3. In addition, redistributions of modified forms of the source or binary # code must carry prominent notices stating that the original code was # changed and the date of the change. # # 4. All publications or advertising materials mentioning features or use of # this software are asked, but not required, to acknowledge that it was # developed by Intel Corporation and credit the contributors. # # 5. Neither the name of Intel Corporation, nor the name of any Contributor # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ LogIter class definition. LogLine class definition. This provides a way of querying CaRT logfiles for processing. """ import os import re class InvalidPid(Exception): """Exception to be raised when invalid pid is requested""" pass class InvalidLogFile(Exception): """Exception to be raised when log file cannot be parsed""" pass LOG_LEVELS = {'FATAL' :1, 'EMRG' :2, 'CRIT' :3, 'ERR' :4, 'WARN' :5, 'NOTE' :6, 'INFO' :7, 'DBUG' :8} # pylint: disable=too-few-public-methods class LogRaw(): """Class for raw (non cart log lines) in cart log files This is used for lines that cannot be identified as cart log lines, for example mercury logs being sent to the same file. """ def __init__(self, line): self.line = line.rstrip('\n') self.trace = False def to_str(self): """Convert the object to a string, in a way that is compatible with LogLine """ return self.line # pylint: disable=too-many-instance-attributes class LogLine(): """Class for parsing CaRT log lines This class implements a way of inspecting individual lines of a log file. It allows for queries such as 'string in line' which will match against the message only, and != which will match the entire line. index is the line in the file, starting at 1. """ def __init__(self, line, index): fields = line.split() # Work out the end of the fixed-width portion, and the beginning of the # message. The hostname and pid fields are both variable width idx = 29 + len(fields[1]) + len(fields[2]) pidtid = fields[2][5:-1] pid = pidtid.split("/") self.pid = int(pid[0]) self._preamble = line[:idx] self.index = index self.mask = fields[3] try: self.level = LOG_LEVELS[fields[4]] except KeyError: raise InvalidLogFile(fields[4]) self._fields = fields[5:] if self._fields[1][-2:] == '()': self.trace = False self.function = self._fields[1][:-2] elif self._fields[1][-1:] == ')': self.trace = True else: self.trace = False if self.trace: if self.level == 7 or self.level == 3: if self.mask == 'rpc' or self.mask == 'hg': del self._fields[2:5] if self.trace: fn_str = self._fields[1] start_idx = fn_str.find('(') self.function = fn_str[:start_idx] desc = fn_str[start_idx+1:-1] if desc == '(nil)': self.descriptor = '' else: self.descriptor = desc self._msg = ' '.join(self._fields) def to_str(self, mark=False): """Convert the object to a string""" # pre = self._preamble.split(' ', maxsplit=3) pre = self._preamble.split(' ', 3) preamble = ' '.join([pre[0], pre[3]]) if mark: return '{} ** {}'.format(preamble, self._msg) return '{} {}'.format(preamble, self._msg) def __getattr__(self, attr): if attr == 'parent': if self._fields[2] == 'Registered': # This is a bit of a hack but handle the case where descriptor # names contain spaces. if self._fields[6] == 'from': return self._fields[7] return self._fields[6] if self._fields[2] == 'Link': return self._fields[5] if attr == 'filename': try: (filename, _) = self._fields[0].split(':') return filename except ValueError: pass elif attr == 'lineno': try: (_, lineno) = self._fields[0].split(':') return int(lineno) except ValueError: pass raise AttributeError def get_msg(self): """Return the message part of a line, stripping up to and including the filename""" return ' '.join(self._fields[1:]) def get_anon_msg(self): """Return the message part of a line, stripping up to and including the filename but removing pointers""" # As get_msg, but try and remove specific information from the message, # This is so that large volumes of logs can be amalgamated and reduced # a common set for easier reporting. Specifically the trace pointer, # fid/revision of GAH values and other pointers are removed. # # These can then be fed back as source-level comments to the source-code # without creating too much output. fields = [] for entry in self._fields[2:]: field = None if entry.startswith('0x') and len(entry) > 5: if entry.endswith(')'): field = '0x...)' else: field = '0x...' if not field: r = re.search("^[0-9,a-f]{8}$", entry) if r: field = 'uuid' if not field: r = re.search("^[0-9,a-f]{8}\[\d+\]\:$", entry) if r: field = 'uuid/rank' if not field: r = re.search("^\d+.\d+.\d+\:*$", entry) if r: field = 'low/high/shard' if field: fields.append(field) else: fields.append(entry) return '{}() {}'.format(self.function, ' '.join(fields)) def endswith(self, item): """Mimic the str.endswith() function This only matches on the actual string part of the message, not the timestamp/pid/faculty parts. """ return self._msg.endswith(item) def get_field(self, idx): """Return a specific field from the line""" return self._fields[idx] def _is_type(self, text, trace=True): """Checks for text in a log message Retuns True if the line starts with the text provided """ if trace and not self.trace: return False # Check that the contents of two arrays are equal, using text as is and # selecting only the correct entries of the fields array. return text == self._fields[2:2+len(text)] def is_new(self): """Returns True if line is new descriptor""" return self._is_type(['Registered', 'new']) def
(self): """Returns true if line is descriptor deregister""" return self._is_type(['Deregistered']) def is_new_rpc(self): """Returns True if line is new rpc""" if not self.trace: return False if self._fields[-1] == 'allocated.': return True if self._fields[-1] == 'received.' and self._fields[-5] == 'allocated': return True return False def is_dereg_rpc(self): """Returns true if line is a rpc deregister""" if not self.trace: return False if self.function != 'crt_hg_req_destroy': return False return self._fields[-1] == 'destroying' def is_callback(self): """Returns true if line is RPC callback""" # TODO: This is broken for now but the RPCtrace has not been ported yet # so there are no current users of it. return self._is_type(['Invoking', 'RPC', 'callback']) def is_link(self): """Returns True if line is Link descriptor""" return self._is_type(['Link']) def is_fi_site(self): return self._is_type(['fault_id'], trace=False) def is_fi_alloc_fail(self): return self._is_type(['out', 'of', 'memory'], trace=False) def is_calloc(self): """Returns True if line is a allocation point""" return self.get_field(2).startswith('alloc(') def is_realloc(self): """Returns True if line is a call to""" return self.get_field(2) == 'realloc' def calloc_size(self): """Returns the size of the allocation""" if self.get_field(5) == '*': if self.is_realloc(): field = -5 else: field = -3 count = int(self.get_field(field).split(':')[-1]) return count * int(self.get_field(4)) return int(self.get_field(4)) def is_free(self): """Returns True if line is a call to free""" return self.get_field(2) == 'free' # pylint: disable=too-many-branches class StateIter(): """Helper class for LogIter to add a statefull iterator. Implement a new iterator() for LogIter() that tracks descriptors and adds two new attributes, pdesc and pparent which are the local descriptor with the reuse-count appended. """ def __init__(self, li): self.reuse_table = {} self.active_desc = {} self.li = li self._l = None def __iter__(self): # Dict, indexed by pointer, containing re-use index for that pointer. self.reuse_table = {} # Conversion from active pointer to line where it was created. self.active_desc = {} self._l = iter(self.li) return self def __next__(self): line = next(self._l) if not line.trace: line.rpc = False return line if line.is_new() or line.is_new_rpc(): if line.descriptor in self.reuse_table: self.reuse_table[line.descriptor] += 1 line.pdesc = '{}_{}'.format(line.descriptor, self.reuse_table[line.descriptor]) else: self.reuse_table[line.descriptor] = 0 line.pdesc = line.descriptor self.active_desc[line.descriptor] = line if line.is_new(): if line.parent in self.active_desc: line.pparent = self.active_desc[line.parent].pdesc else: line.pparent = line.parent line.rpc = False else: line.rpc = True elif line.is_link(): if line.parent in self.active_desc: line.pparent = self.active_desc[line.parent].pdesc else: line.pparent = line.parent line.pdesc = line.descriptor line.rpc = False else: if line.descriptor in self.active_desc: line.rpc = self.active_desc[line.descriptor].rpc if not line.rpc: line.pparent = self.active_desc[line.descriptor].pparent line.pdesc = self.active_desc[line.descriptor].pdesc line.rpc_opcode = self.active_desc[line.descriptor].get_field(3) else: line.pdesc = line.descriptor line.rpc = False if (line.is_dereg() or line.is_dereg_rpc()) and \ line.descriptor in self.active_desc: del self.active_desc[line.descriptor] return line def next(self): """Python2/3 compat function""" return self.__next__() # pylint: disable=too-many-branches # pylint: disable=too-few-public-methods class LogIter(): """Class for parsing CaRT log files This class implements a iterator for lines in a cart log file. The iterator is rewindable, and there are options for automatically skipping lines. """ def __init__(self, fname): """Load a file, and check how many processes have written to it""" # Depending on file size either pre-read entire file into memory, # or do a first pass checking the pid list. This allows the same # iterator to work fast if the file can be kept in memory, or the # same, bug slower if it needs to be re-read each time. # # Try and open the file as utf-8, but if that doesn't work then # find and report the error, then continue with the file open as # latin-1 self._fd = None try: self._fd = open(fname, 'r', encoding='utf-8') self._fd.read() except UnicodeDecodeError as err: print('ERROR: Invalid data in server.log on following line') self._fd = open(fname, 'r', encoding='latin-1') self._fd.read(err.start - 200) data = self._fd.read(199) lines = data.splitlines() print(lines[-1]) self._fd.seek(0) self.fname = fname self._data = [] index = 0 pids = set() i = os.fstat(self._fd.fileno()) self.__from_file = bool(i.st_size > (1024*1024*20)) self.__index = 0 for line in self._fd: # fields = line.split(maxsplit=8) fields = line.split(' ', 8) index += 1 if self.__from_file: if len(fields) < 6 or len(fields[0]) != 17: continue l_obj = LogLine(line, index) pids.add(l_obj.pid) else: if len(fields) < 6 or len(fields[0]) != 17: self._data.append(LogRaw(line)) else: l_obj = LogLine(line, index) pids.add(l_obj.pid) self._data.append(l_obj) # Offset into the file when iterating. This is an array index, and is # based from zero, as opposed to line index which is based from 1. self._offset = 0 self._pid = None self._trace_only = False self._raw = False self._pids = sorted(pids) def __del__(self): if self._fd: self._fd.close() def new_iter(self, pid=None, stateful=False, trace_only=False, raw=False): """Rewind file iterator, and set options If pid is set the the iterator will only return lines matching the pid If trace_only is True then the iterator will only return trace lines. if raw is set then all lines in the file are returned, even non-log lines. """ if pid is not None: if pid not in self._pids: raise InvalidPid self._pid = pid else: self._pid = None self._trace_only = trace_only self._raw = raw if stateful: if not pid: raise InvalidPid return StateIter(self) return self def __iter__(self, pid=None): if self.__from_file: self._fd.seek(0) self.__index = 0 else: self._offset = 0 return self def __lnext(self): """Helper function for __next__""" if self.__from_file: line = self._fd.readline() if not line: raise StopIteration self.__index += 1 # fields = line.split(maxsplit=8) fields = line.split(' ', 8) if len(fields) < 6 or len(fields[0]) != 17: return LogRaw(line) return LogLine(line, self.__index) try: line = self._data[self._offset] except IndexError: raise StopIteration self._offset += 1 return line def __next__(self): while True: line = self.__lnext() if not self._raw and isinstance(line, LogRaw): continue if self._trace_only and not line.trace: continue if isinstance(line, LogRaw) and self._pid: continue if self._pid and line.pid != self._pid: continue return line def next(self): """Python2/3 compat function""" return self.__next__() def get_pids(self): """Return an array of pids appearing in the file""" return self._pids # pylint: enable=too-many-instance-attributes
is_dereg
guild_integrations_update.py
# Copyright Pincer 2021-Present # Full MIT License can be found in `LICENSE` at the project root. """sent when a guild integration is updated.""" from ..core.dispatch import GatewayDispatch from ..objects.events.guild import GuildIntegrationsUpdateEvent from ..utils import Coro from ..utils.conversion import construct_client_dict async def guild_integrations_update_middleware(self, payload: GatewayDispatch): """|coro| Middleware for ``on_guild_integrations_update`` event. Parameters ---------- payload : :class:`~pincer.core.dispatch.GatewayDispatch` The data received from the guild integrations update event. Returns ------- Tuple[:class:`str`, :class:`~pincer.objects.events.guild.GuildIntegrationsUpdateEvent`] ``on_guild_integration_update`` and a ``GuildIntegrationsUpdateEvent`` """ # noqa: E501 return ( "on_guild_integrations_update", GuildIntegrationsUpdateEvent.from_dict( construct_client_dict(self, payload.data) ) ) def
() -> Coro: return guild_integrations_update_middleware
export
Label.types.ts
import * as React from 'react'; /* tslint:enable:no-unused-variable */ import { ITheme, IStyle } from '@uifabric/styling'; import { IRefObject, IComponentAs } from '@uifabric/utilities'; import { IStyleFunctionOrObject } from '@uifabric/merge-styles'; /** * {@docCategory Label} */ export interface ILabel {} /** * {@docCategory Label} */ export interface ILabelProps extends React.LabelHTMLAttributes<HTMLLabelElement> { /** * Render the root element as another type. */ as?: IComponentAs<React.AllHTMLAttributes<HTMLElement>>; /** * Optional callback to access the ILabel interface. Use this instead of ref for accessing * the public methods and properties of the component. */ componentRef?: IRefObject<ILabel>; /** * Whether the associated form field is required or not * @defaultvalue false */ required?: boolean; /** * Renders the label as disabled. */ disabled?: boolean; /** * Theme provided by HOC. */ theme?: ITheme; /** * Styles for the label. */ styles?: IStyleFunctionOrObject<ILabelStyleProps, ILabelStyles>; } /** * {@docCategory Label} */ export interface ILabelStyles { /** * Styles for the root element. */ root: IStyle; } /** * {@docCategory Label} */ export interface ILabelStyleProps { /** * */ theme: ITheme;
className?: string; disabled?: boolean; required?: boolean; }
cloudSync.js
import { isEqual } from 'lodash/core'; import { isEmpty } from 'lodash'; import { sendMessageToAllTabs } from '../utils/messaging'; const axios = require('axios'); const syncStatus = { syncing: false, syncFailed: false, syncingBlockedBySyncing: false, }; const cloudSync = async function(serverUrl, skipEqualityCheck) { console.log( ' asyncCloudSync called, syncing status, serverUrl, skipEqualityCheck', syncStatus.syncing, serverUrl, skipEqualityCheck, timestamp() ); // console.log('window.location.href', window.location.href); if (syncStatus.syncing) { console.log(' syncing blocked by concurrent sync', timestamp()); syncStatus.syncingBlockedBySyncing = true; return null; } else syncStatus.syncingBlockedBySyncing = false; syncStatus.syncing = true; sendOutMessage({ syncing: true, value: true }); try { const storage = await getStorage(); // console.log(' getStorage results', storage); const jwt = storage.jwt; const lastSyncsUserCollection = storage.lastSyncsUserCollection; const lastSyncsWebsites = storage.lastSyncsWebsites; let localUserCollection = storage.localUserCollection; let localWebsites = storage.localWebsites; if (isEmpty(localUserCollection)) localUserCollection = {}; const initialUserCollection = JSON.parse(JSON.stringify(localUserCollection)); if (isEmpty(localWebsites)) localWebsites = {}; const initialWebsites = JSON.parse(JSON.stringify(localWebsites)); const uploadFailedCardsPut = storage.uploadFailedCardsPut; const uploadFailedCardsPost = storage.uploadFailedCardsPost; const uploadFailedDecksPost = storage.uploadFailedDecksPost; await uploadFailedItems( jwt, serverUrl, uploadFailedDecksPost, uploadFailedCardsPut, uploadFailedCardsPost ); if (!skipEqualityCheck) { const equal = await checkEquality( lastSyncsWebsites, lastSyncsUserCollection, localWebsites, localUserCollection ); if (equal) { syncStatus.syncing = false; sendOutMessage({ syncing: true, value: false }); sendOutMessage({ syncNotUpToDate: true, value: false });
return null; } } const serverCollection = await getMetaData(jwt, serverUrl); // console.log(' serverCollection', serverCollection); localUserCollection = await syncHighlightUrls( jwt, serverUrl, localWebsites, localUserCollection, serverCollection, initialUserCollection ); localUserCollection = await syncUserCollection( jwt, serverUrl, localUserCollection, serverCollection, initialUserCollection ); const serverWebsites = await getWebsitesMeta(jwt, serverUrl); const comparison = await compareLocalAndServerWebsites( localWebsites, serverWebsites, localUserCollection.user_id ); const localNewer = comparison.localNewer; const serverNewer = comparison.serverNewer; localWebsites = comparison.localWebsites; localWebsites = await removeDeletedFromLocal(localWebsites); const getWebsitesSelectedContentResults = await getNewerFromServer(jwt, serverUrl, serverNewer); const saved = await saveNewerToLocal( getWebsitesSelectedContentResults, localWebsites, localUserCollection, initialWebsites ); if (!saved) { console.log(' collection changed while syncing, abort and restart sync', timestamp()); syncStatus.syncing = false; cloudSync(serverUrl, true); return null; } await uploadNewerToServer(jwt, serverUrl, localNewer); console.log( ' sync complete, was syncingBlockedBySyncing?', syncStatus.syncingBlockedBySyncing, timestamp() ); if (syncStatus.syncingBlockedBySyncing) { syncStatus.syncing = false; cloudSync(serverUrl); return null; } else { // success! syncStatus.syncing = false; sendOutMessage({ syncing: true, value: false }); sendOutMessage({ syncNotUpToDate: true, value: false }); } } catch (error) { console.log(' sync error', error, timestamp()); syncStatus.syncing = false; sendOutMessage({ syncing: true, value: false }); sendOutMessage({ syncNotUpToDate: true, value: true }); return null; } }; function sendOutMessage(msg) { // because the popup also needs to hear the message, so sendMessageToAllTabs won't reach it sendMessageToAllTabs(msg); chrome.runtime.sendMessage(msg); } function timestamp() { const now = new Date(); return `${now.getMinutes()}:${now.getSeconds()}:${now.getMilliseconds()}`; } // during sync, should we send to the highlighter delete the cards, or just do it in the sync function? // function sendOutDeleteCard(cardId, url) { // chrome.storage.local.get(['websites'], function(items) { // const websites = items.websites; // let card; // for (const cardToCheck of websites[url].cards) { // if (cardToCheck.card_id === cardId) card = cardToCheck; // break; // } // sendMessageToAllTabs({ // deleteCard: true, // card: card, // url: url, // }); // }); // } async function callAPI(data) { let result = null; const options = { url: data.url, headers: { 'content-type': 'application/json', 'x-access-token': data.jwt, }, method: data.method, }; if (data.data) { options.data = data.data; } console.log('sending axios, options', options); await axios(options) .then(response => { result = response.data; console.log(result); }) .catch(function(err) { console.log(err, timestamp()); sendOutMessage({ syncing: true, value: false }); sendOutMessage({ syncNotUpToDate: true, value: true }); throw new Error(err); }); return result; } function getStorage() { // using chrome.storage inside an async, wrap it in a promise (remember to add 'new' before promise!) // https://stackoverflow.com/questions/59440008/how-to-wait-for-asynchronous-chrome-storage-local-get-to-finish-before-continu return new Promise((resolve, reject) => { chrome.storage.local.get( [ 'user_collection', 'lastSyncsUserCollection', 'lastSyncsWebsites', 'websites', 'decks_meta', 'jwt', 'uploadFailedCards', 'uploadFailedDecks', ], function(items) { // console.log(' items', items); const returnData = {}; returnData.jwt = items.jwt; isEmpty(items.websites) ? (returnData.localWebsites = {}) : (returnData.localWebsites = items.websites); returnData.localUserCollection = items.user_collection; // because of strange firefox bug where user_collection wasn't getting set properly if (isEmpty(items.user_collection) && !isEmpty(items.lastSyncsUserCollection)) returnData.localUserCollection = items.lastSyncsUserCollection; returnData.lastSyncsUserCollection = items.lastSyncsUserCollection; returnData.lastSyncsWebsites = items.lastSyncsWebsites; returnData.uploadFailedCards = items.uploadFailedCards; returnData.uploadFailedDecks = items.uploadFailedDecks; if (items.jwt !== undefined) { resolve(returnData); } else { reject(new Error('Unable to retrieve local storage')); } } ); }); } async function checkEquality( lastSyncsWebsites, lastSyncsUserCollection, localWebsites, localUserCollection ) { if (lastSyncsWebsites && lastSyncsUserCollection) { // console.log(' lastSyncsWebsites, localWebsites', lastSyncsWebsites, localWebsites); // console.log( // 'lastSyncsUserCollection.highlight_urls, localUserCollection.highlight_urls', // lastSyncsUserCollection.highlight_urls, // localUserCollection.highlight_urls // ); if ( isEqual(lastSyncsWebsites, localWebsites) && isEqual(lastSyncsUserCollection.highlight_urls, localUserCollection.highlight_urls) ) { console.log(' last sync equal', timestamp()); return true; } } console.log(' last sync unequal', timestamp()); return false; } async function uploadFailedItems( jwt, serverUrl, uploadFailedDecksPost, uploadFailedCardsPut, uploadFailedCardsPost ) { if (!isEmpty(uploadFailedDecksPost)) { for (const entry of uploadFailedCardsPost) { sendOutMessage({ postDeck: true, jwt: jwt, serverUrl: serverUrl, card: entry.card, deck: entry.deck, }); } } if (!isEmpty(uploadFailedCardsPut)) { for (const entry of uploadFailedCardsPost) { sendOutMessage({ putCard: true, jwt: jwt, serverUrl: serverUrl, card: entry.card, deckId: entry.deck_id, }); } } if (!isEmpty(uploadFailedCardsPost)) { for (const entry of uploadFailedCardsPost) { sendOutMessage({ postCard: true, jwt: jwt, serverUrl: serverUrl, card: entry.card, deckId: entry.deck_id, deckTitle: entry.title, }); } } } async function getMetaData(jwt, serverUrl) { const getMetaDataCall = { url: serverUrl + '/get_decks_meta_and_collection', jwt: jwt, method: 'GET', }; let metaDataCallResults = null; await callAPI(getMetaDataCall).then(data => { metaDataCallResults = data; }); console.log(' Get Meta and Collection results ', metaDataCallResults); if (!metaDataCallResults) { throw new Error('error in get_decks_meta_and_collection'); } chrome.storage.local.set({ decks_meta: metaDataCallResults.decks_meta }); return metaDataCallResults.user_collection; } async function syncHighlightUrls( jwt, serverUrl, localWebsites, localUserCollection, serverCollection, initialUserCollection ) { // console.log( // 'sync highlight urls', // jwt, // serverUrl, // localWebsites, // localUserCollection, // serverCollection, // initialUserCollection // ); // sync highlight_urls // This still isn't perfect, what if local or server added urls but didnt sync let postHighlightUrls = false; // make sure localHighlights_Urls is accurate const lHighlightUrls = localUserCollection.highlight_urls; const sHighlightUrls = serverCollection.highlight_urls; let highlightUrlsHighlightUrlsListCheck = localUserCollection.highlight_urls.list; if (isEmpty(localWebsites)) highlightUrlsHighlightUrlsListCheck = []; else { highlightUrlsHighlightUrlsListCheck = []; for (const url in localWebsites) { let hasHighlights; let hasCards; let hasDeleted; if (!isEmpty(localWebsites[url].highlights)) hasHighlights = true; if (!isEmpty(localWebsites[url].cards)) hasCards = true; if (!isEmpty(localWebsites[url].deleted)) hasDeleted = true; if (hasHighlights || hasCards || hasDeleted) highlightUrlsHighlightUrlsListCheck.push(url); } } if (!isEqual(highlightUrlsHighlightUrlsListCheck, lHighlightUrls.list)) { localUserCollection.highlight_urls.list = highlightUrlsHighlightUrlsListCheck; lHighlightUrls.list = highlightUrlsHighlightUrlsListCheck; } if (!sHighlightUrls || sHighlightUrls.list.length === 0) { if (lHighlightUrls.list.length > 0) postHighlightUrls = true; } else { // console.log('sHighlightUrls, lHighlightUrls', sHighlightUrls, lHighlightUrls); if (!isEqual(sHighlightUrls, lHighlightUrls)) { if ( sHighlightUrls.edited > lHighlightUrls.edited || (sHighlightUrls.edited === lHighlightUrls.edited && sHighlightUrls.list.length > lHighlightUrls.list.length) ) { chrome.storage.local.get(['user_collection'], function(items) { if ( !isEqual(initialUserCollection.highlight_urls, items.user_collection.highlight_urls) ) { console.log( ' collection changed while syncing, abort and restart sync', timestamp() ); syncStatus.syncing = false; cloudSync(serverUrl, true); return null; } else { localUserCollection.highlight_urls = serverCollection.highlight_urls; chrome.storage.local.set({ user_collection: localUserCollection, }); } }); } else { postHighlightUrls = true; } } } // console.log(' postHighlightUrls', postHighlightUrls); // console.log(' localUserCollection.highlight_urls', localUserCollection.highlight_urls); if (postHighlightUrls) { // console.log(' posting highlight_urls', localUserCollection.highlight_urls); const putSettingsData = { url: serverUrl + '/put_user_collection', jwt: jwt, method: 'PUT', data: { highlight_urls: localUserCollection.highlight_urls, }, }; let putSettingsResult = null; await callAPI(putSettingsData).then(data => { putSettingsResult = data; }); console.log(' Put highlight_urls changes', putSettingsResult, timestamp()); if (!putSettingsResult) { throw new Error('error in highlight_urls'); } } return localUserCollection; } async function syncUserCollection( jwt, serverUrl, localUserCollection, serverCollection, initialUserCollection ) { // console.log('localUserCollection, serverCollection', localUserCollection, serverCollection); // sync settings, schedule, all_card_tags, //later extension settings. any one with 'edited' for (const section in localUserCollection) { if (section === 'user_id') { if (isEmpty(localUserCollection.user_id) && !isEmpty(serverCollection.user_id)) { localUserCollection.user_id = serverCollection.user_id; chrome.storage.local.set({ user_collection: localUserCollection, }); } } if (section === 'webapp_settings' || section === 'schedule' || section === 'all_card_tags') { if (isEmpty(serverCollection[section])) serverCollection[section] = { edited: 0, }; if (isEmpty(localUserCollection[section])) localUserCollection[section] = { edited: 0, }; if (!isEqual(serverCollection[section], localUserCollection[section])) { console.log( 'serverCollection[section], userCollection[section]', serverCollection[section], localUserCollection[section] ); if (serverCollection[section].edited > localUserCollection[section].edited) { chrome.storage.local.get(['user_collection'], function(items) { if ( !isEqual(initialUserCollection[section], items.user_collection[section]) || !isEqual(initialUserCollection[section], items.user_collection[section]) ) { console.log( ' collection changed while syncing, abort and restart sync', timestamp() ); syncStatus.syncing = false; cloudSync(serverUrl, true); return null; } else { localUserCollection[section] = serverCollection[section]; chrome.storage.local.set({ user_collection: localUserCollection, }); } }); } else if (serverCollection[section].edited < localUserCollection[section].edited) { console.log('putting section: ', section); const putSectionData = { url: serverUrl + '/put_user_collection', jwt: jwt, method: 'PUT', data: { [section]: localUserCollection[section], }, }; let putSectionResult = null; await callAPI(putSectionData).then(data => { putSectionResult = data; }); console.log(' PUT section results', putSectionResult); } } } } return localUserCollection; } async function getWebsitesMeta(jwt, serverUrl) { const getWebsitesMetaCall = { url: serverUrl + '/get_websites_meta', jwt: jwt, method: 'GET', }; let websitesMetaResults = null; await callAPI(getWebsitesMetaCall).then(data => { websitesMetaResults = data; }); console.log(' websitesMetaResults ', websitesMetaResults, timestamp()); if (!websitesMetaResults) { throw new Error('error in get_websites_selected_content'); } return websitesMetaResults.websites_meta; } async function compareLocalAndServerWebsites(localWebsites, serverWebsites, userId) { let localNewer = {}; let serverNewer = {}; console.log( ' compareLocalAndServerWebsites: localWebsites, serverWebsites', localWebsites, serverWebsites, timestamp() ); // for cards, highlighted, and deleted, check if it only exists in one, if entry exists in both, compare edited date if (isEmpty(serverWebsites)) localNewer = localWebsites; else { if (isEmpty(localWebsites)) serverNewer = serverWebsites; else { for (const url in serverWebsites) { localNewer[url] = { cards: [], highlights: {}, deleted: [], }; serverNewer[url] = { cards: [], highlights: {}, deleted: [], }; if (!Object.keys(localWebsites).includes(url)) serverNewer[url] = serverWebsites[url]; for (const lUrl in localWebsites) { if ( !Object.keys(serverWebsites).includes(lUrl) && !Object.keys(localNewer).includes(lUrl) ) localNewer[lUrl] = localWebsites[lUrl]; else { if (!Object.keys(localNewer).includes(lUrl)) localNewer[lUrl] = { cards: [], highlights: {}, deleted: [], }; if (!Object.keys(serverNewer).includes(lUrl)) serverNewer[lUrl] = { cards: [], highlights: {}, deleted: [], }; else if (url === lUrl) { const serverWebsite = serverWebsites[url]; const localWebsite = localWebsites[url]; // compare cards const serverCards = []; const localCards = []; if (!isEmpty(serverWebsite.cards)) { for (const sCard of serverWebsite.cards) { serverCards.push(sCard.card_id); } } if (!isEmpty(localWebsite.cards)) { for (const lCard of localWebsite.cards) { localCards.push(lCard.card_id); // if same exists, compare edited if (serverCards.includes(lCard.card_id)) { for (const sCard of serverWebsite.cards) { if (sCard.card_id === lCard.card_id) { if (sCard.edited > lCard.edited) { serverNewer[url].cards.push(sCard); } else if (sCard.edited < lCard.edited) { localNewer[url].cards.push(sCard); } } } } else { // if doesn't exist, add directly localNewer[url].cards.push(lCard); } } } if (serverCards.length > 0) { for (const sCard of serverWebsite.cards) { if (!localCards.includes(sCard.card_id)) { serverNewer[lUrl].cards.push(sCard); } } } // compare highlights const lHighlights = localWebsite.highlights; const sHighlights = serverWebsite.highlights; if (isEmpty(sHighlights) && !isEmpty(lHighlights)) localNewer[url].highlights = lHighlights; else if (isEmpty(lHighlights) && !isEmpty(sHighlights)) serverNewer[url].highlights = sHighlights; else if (!isEmpty(lHighlights) && !isEmpty(sHighlights)) { for (const highlight in sHighlights) { if (!Object.keys(lHighlights).includes(highlight)) serverNewer[url].highlights[highlight] = sHighlights[highlight]; for (const lHighlight in lHighlights) { if (!Object.keys(sHighlights).includes(lHighlight)) localNewer[url].highlights[lHighlight] = lHighlights[lHighlight]; else if (lHighlight === highlight) { if (lHighlights[highlight].edited > sHighlights[highlight].edited) localNewer[url].highlights[highlight] = lHighlights[highlight]; else if (lHighlights[highlight].edited < sHighlights[highlight].edited) serverNewer[url].highlights[highlight] = sHighlights[highlight]; } } } } // compare deleted if (isEmpty(serverWebsite.deleted) && !isEmpty(localWebsite.deleted)) localNewer[url].deleted = localWebsite.deleted; else if (isEmpty(localWebsite.deleted) && !isEmpty(serverWebsite.deleted)) serverNewer[url].deleted = serverWebsite.deleted; else if (!isEmpty(serverWebsite.deleted) && !isEmpty(localWebsite.deleted)) { const mergedDeletedRaw = serverWebsite.deleted.concat( localWebsite.deleted.filter(entry => !serverWebsite.deleted.includes(entry)) ); const mergedDeleted = []; for (const entry of mergedDeletedRaw) { if (!mergedDeleted.includes(entry)) mergedDeleted.push(entry); } // console.log( // 'mergedDeleted, serverWebsite.deleted, localWebsite.deleted', // mergedDeleted, // serverWebsite.deleted, // localWebsite.deleted, // timestamp() // ); if (!isEqual(mergedDeleted, serverWebsite.deleted)) localNewer[url].deleted = localWebsite.deleted; if (!isEqual(mergedDeleted, localWebsite.deleted)) { serverNewer[url].deleted = serverWebsite.deleted; // this insures localWebsites is up to date for the next part localWebsites[url].deleted = mergedDeleted; } } } } } } } } function purgeEmptyAndDeleted(localNewer, serverNewer) { const purgedLocalNewer = {}; // console.log(' localNewer, serverNewer', localNewer, serverNewer, timestamp()); // filter out deleted cards/highlights, others cards/highlights and empty sections here for (const url in localNewer) { let deleted = []; // based on previous steps, this should already be the up to date merged deleted list if (localWebsites[url]) { if (localWebsites[url].deleted) deleted = localWebsites[url].deleted; } let hasCards = false; let hasHighlights = false; let hasDeleted = false; if (!isEmpty(localNewer[url].cards)) hasCards = true; if (!isEmpty(localNewer[url].highlights)) hasHighlights = true; if (!isEmpty(localNewer[url].deleted)) hasDeleted = true; if (hasCards || hasHighlights || hasDeleted) { purgedLocalNewer[url] = {}; if (hasCards) for (const card of localNewer[url].cards) if (!deleted.includes(card.card_id) && card.user_id === userId) { if (!purgedLocalNewer[url].cards) purgedLocalNewer[url].cards = []; purgedLocalNewer[url].cards.push(card); } if (hasHighlights) for (const highlight in localNewer[url].highlights) if ( !deleted.includes(highlight) && localNewer[url].highlights[highlight].user_id === userId ) { if (!purgedLocalNewer[url].highlights) purgedLocalNewer[url].highlights = {}; purgedLocalNewer[url].highlights[highlight] = localNewer[url].highlights[highlight]; } if (hasDeleted) purgedLocalNewer[url].deleted = localNewer[url].deleted; } } const purgedServerNewer = {}; for (const url in serverNewer) { let deleted = []; if (localWebsites[url]) { if (localWebsites[url].deleted) deleted = localWebsites[url].deleted; } let hasCards = false; let hasHighlights = false; let hasDeleted = false; if (!isEmpty(serverNewer[url].cards)) hasCards = true; if (!isEmpty(serverNewer[url].highlights)) hasHighlights = true; if (!isEmpty(serverNewer[url].deleted)) hasDeleted = true; if (hasCards || hasHighlights || hasDeleted) { purgedServerNewer[url] = {}; if (hasCards) for (const card of serverNewer[url].cards) if (!deleted.includes(card.card_id) && card.user_id === userId) { if (!purgedServerNewer[url].cards) purgedServerNewer[url].cards = []; purgedServerNewer[url].cards.push(card); } if (hasHighlights) for (const highlight in serverNewer[url].highlights) if ( !deleted.includes(highlight) && serverNewer[url].highlights[highlight].user_id === userId ) { if (!purgedServerNewer[url].highlights) purgedServerNewer[url].highlights = {}; purgedServerNewer[url].highlights[highlight] = serverNewer[url].highlights[highlight]; } if (hasDeleted) purgedServerNewer[url].deleted = serverNewer[url].deleted; } } console.log( ' purgedLocalNewer, purgedServerNewer', purgedLocalNewer, purgedServerNewer, timestamp() ); return { localNewer: purgedLocalNewer, serverNewer: purgedServerNewer, }; } const purged = purgeEmptyAndDeleted(localNewer, serverNewer); return { localNewer: purged.localNewer, serverNewer: purged.serverNewer, localWebsites: localWebsites, }; } async function removeDeletedFromLocal(localWebsites) { // delete local cards and highlights from local // console.log(' localWebsites before deletions', localWebsites); if (!isEmpty(localWebsites)) { for (const url in localWebsites) { const website = JSON.parse(JSON.stringify(localWebsites[url])); if (website.deleted) { if (website.cards) { const purgedCards = []; for (const card of website.cards) { if (!website.deleted.includes(card.card_id)) purgedCards.push(card); } if (purgedCards.length !== website.cards.length) website.cards = purgedCards; } if (website.highlights) { const purgedHighlights = {}; for (const highlight in website.highlights) { if (!website.deleted.includes(highlight)) purgedHighlights[highlight] = website.highlights[highlight]; } if (Object.keys(website.highlights).length !== Object.keys(purgedHighlights).length) website.highlights = purgedHighlights; } localWebsites[url] = website; } } } // console.log(' localWebsites after deletions', localWebsites); return localWebsites; } async function getNewerFromServer(jwt, serverUrl, serverNewer) { // get highlights/cards let getWebsitesSelectedContentResults = null; if (!isEmpty(serverNewer)) { const getWebsitesSelectedContentCall = { url: serverUrl + '/get_websites_selected_content', jwt: jwt, method: 'POST', data: serverNewer, }; await callAPI(getWebsitesSelectedContentCall).then(data => { getWebsitesSelectedContentResults = data; }); console.log( ' get Websites Selected Content Results ', getWebsitesSelectedContentResults, timestamp() ); if (!getWebsitesSelectedContentResults) { throw new Error('error in get_websites_selected_content'); } return getWebsitesSelectedContentResults; } } async function saveNewerToLocal( getWebsitesSelectedContentResults, localWebsites, localUserCollection, initialWebsites ) { // add new items to local if (!isEmpty(getWebsitesSelectedContentResults)) { if (!isEmpty(getWebsitesSelectedContentResults.websites)) { const newWebsites = getWebsitesSelectedContentResults.websites; if (isEmpty(localWebsites)) localWebsites = newWebsites; else { for (const url in newWebsites) { const nWebsite = newWebsites[url]; if (!Object.keys(localWebsites).includes(url)) localWebsites[url] = nWebsite; else { for (const lUrl in localWebsites) { if (url === lUrl) { const lWebsite = localWebsites[url]; if (!isEmpty(nWebsite.cards)) { if (!lWebsite.cards) lWebsite.cards = []; for (const card of nWebsite.cards) lWebsite.cards.push(card); } if (!isEmpty(nWebsite.highlights)) { if (!lWebsite.highlights) lWebsite.highlights = {}; for (const highlight in nWebsite.highlights) { lWebsite.highlights[highlight] = nWebsite.highlights[highlight]; } } if (!isEmpty(nWebsite.deleted)) { if (!lWebsite.deleted) lWebsite.deleted = []; for (const entry of nWebsite.deleted) if (!lWebsite.deleted.includes(entry)) lWebsite.deleted.push(entry); } // unnecesary? not sure if getting changed without this and the JSON.parse localWebsites[url] = lWebsite; } } } } } } } function getStorageWebsites() { return new Promise((resolve, reject) => { chrome.storage.local.get(['websites'], items => { let websites; isEmpty(items.websites) ? (websites = {}) : (websites = items.websites); if (websites !== undefined) { resolve(websites); } else { reject(new Error('Unable to retrieve local storage')); } }); }); } function setStorage(items) { return new Promise(resolve => { console.log(' saving to local, localWebsites', localWebsites, timestamp()); chrome.storage.local.set(items, () => { resolve(true); }); }); } const currentWebsites = await getStorageWebsites(); // bug =deleted is not in local websites..... // save to local // if intitial doesn't equal current, means websites was changed during sync if (!isEqual(initialWebsites, currentWebsites)) { // for (const iWebsite in initialWebsites) { // if (!Object.keys(currentWebsites).includes(iWebsite)) console.log(iWebsite); // for (const cWebsite in currentWebsites) { // if (!Object.keys(initialWebsites).includes(cWebsite)) console.log(cWebsite); // if (iWebsite === cWebsite) { // if (!isEqual(initialWebsites[iWebsite], currentWebsites[cWebsite])) { // for (const iItem in initialWebsites[iWebsite]) { // if (!Object.keys(cWebsite).includes(iItem)) // console.log('cWebsite not included', initialWebsites[iWebsite][iItem]); // for (const cItem in currentWebsites[cWebsite]) { // if (!Object.keys(iWebsite).includes(cItem)) // console.log('iWebsite not included', currentWebsites[cWebsite][cItem]); // if (iItem === cItem) { // if (!isEqual(iItem, cItem)) // console.log( // 'difference here', // currentWebsites[cWebsite][cItem], // initialWebsites[iWebsite][iItem] // ); // } // } // } // } // } // } // } return false; } else { if (isEmpty(localWebsites)) localWebsites = {}; const items = { websites: localWebsites, lastSyncsWebsites: localWebsites, lastSyncsUserCollection: localUserCollection, }; return setStorage(items); } } async function uploadNewerToServer(jwt, serverUrl, localNewer) { // post to server if (!isEmpty(localNewer)) { console.log(' posting websites, localNewer', localNewer, timestamp()); const postWebsitesCall = { url: serverUrl + '/post_websites', jwt: jwt, method: 'POST', data: { websites: localNewer, }, }; let postWebsitesResult = null; await callAPI(postWebsitesCall).then(data => { postWebsitesResult = data; }); console.log(' postWebsites Result', postWebsitesResult, timestamp()); if (!postWebsitesResult) { throw new Error('error posting websites'); } } } export { cloudSync, syncStatus };
groupproperties.py
__author__ = 'Pavel Ageyev' class Groups: def __init__(self, name , header, footer): self.name=name self.header=header self.footer=footer class Formfields: def __init__(self, firstName, lastName, companyName, email, mobile): self.firstName=firstName self.lastName=lastName self.companyName=companyName self.email=email
self.mobile=mobile
prefixes.rs
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! From the NLL RFC: "The deep [aka 'supporting'] prefixes for an //! place are formed by stripping away fields and derefs, except that //! we stop when we reach the deref of a shared reference. [...] " //! //! "Shallow prefixes are found by stripping away fields, but stop at //! any dereference. So: writing a path like `a` is illegal if `a.b` //! is borrowed. But: writing `a` is legal if `*a` is borrowed, //! whether or not `a` is a shared or mutable reference. [...] " use super::MirBorrowckCtxt; use rustc::hir; use rustc::ty::{self, TyCtxt}; use rustc::mir::{Mir, Place, ProjectionElem}; pub trait IsPrefixOf<'tcx> { fn is_prefix_of(&self, other: &Place<'tcx>) -> bool; } impl<'tcx> IsPrefixOf<'tcx> for Place<'tcx> { fn is_prefix_of(&self, other: &Place<'tcx>) -> bool { let mut cursor = other; loop { if self == cursor { return true; } match *cursor { Place::Promoted(_) | Place::Local(_) | Place::Static(_) => return false, Place::Projection(ref proj) => { cursor = &proj.base; } } } } } pub(super) struct Prefixes<'cx, 'gcx: 'tcx, 'tcx: 'cx> { mir: &'cx Mir<'tcx>, tcx: TyCtxt<'cx, 'gcx, 'tcx>, kind: PrefixSet, next: Option<&'cx Place<'tcx>>, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[allow(dead_code)] pub(super) enum PrefixSet { /// Doesn't stop until it returns the base case (a Local or /// Static prefix). All, /// Stops at any dereference. Shallow, /// Stops at the deref of a shared reference. Supporting, } impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// Returns an iterator over the prefixes of `place` /// (inclusive) from longest to smallest, potentially /// terminating the iteration early based on `kind`. pub(super) fn prefixes( &self, place: &'cx Place<'tcx>, kind: PrefixSet, ) -> Prefixes<'cx, 'gcx, 'tcx> { Prefixes { next: Some(place), kind, mir: self.mir, tcx: self.infcx.tcx, } } } impl<'cx, 'gcx, 'tcx> Iterator for Prefixes<'cx, 'gcx, 'tcx> { type Item = &'cx Place<'tcx>; fn
(&mut self) -> Option<Self::Item> { let mut cursor = match self.next { None => return None, Some(place) => place, }; // Post-processing `place`: Enqueue any remaining // work. Also, `place` may not be a prefix itself, but // may hold one further down (e.g. we never return // downcasts here, but may return a base of a downcast). 'cursor: loop { let proj = match *cursor { Place::Promoted(_) | Place::Local(_) | // search yielded this leaf Place::Static(_) => { self.next = None; return Some(cursor); } Place::Projection(ref proj) => proj, }; match proj.elem { ProjectionElem::Field(_ /*field*/, _ /*ty*/) => { // FIXME: add union handling self.next = Some(&proj.base); return Some(cursor); } ProjectionElem::Downcast(..) | ProjectionElem::Subslice { .. } | ProjectionElem::ConstantIndex { .. } | ProjectionElem::Index(_) => { cursor = &proj.base; continue 'cursor; } ProjectionElem::Deref => { // (handled below) } } assert_eq!(proj.elem, ProjectionElem::Deref); match self.kind { PrefixSet::Shallow => { // shallow prefixes are found by stripping away // fields, but stop at *any* dereference. // So we can just stop the traversal now. self.next = None; return Some(cursor); } PrefixSet::All => { // all prefixes: just blindly enqueue the base // of the projection self.next = Some(&proj.base); return Some(cursor); } PrefixSet::Supporting => { // fall through! } } assert_eq!(self.kind, PrefixSet::Supporting); // supporting prefixes: strip away fields and // derefs, except we stop at the deref of a shared // reference. let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); match ty.sty { ty::RawPtr(_) | ty::Ref( _, /*rgn*/ _, /*ty*/ hir::MutImmutable ) => { // don't continue traversing over derefs of raw pointers or shared borrows. self.next = None; return Some(cursor); } ty::Ref( _, /*rgn*/ _, /*ty*/ hir::MutMutable, ) => { self.next = Some(&proj.base); return Some(cursor); } ty::Adt(..) if ty.is_box() => { self.next = Some(&proj.base); return Some(cursor); } _ => panic!("unknown type fed to Projection Deref."), } } } }
next
index.js
import Vue from "vue"; import Router from "vue-router"; Vue.use(Router); /* Layout */ import Layout from "@/layout"; /** * constantRoutes * a base page that does not have permission requirements * all roles can be accessed */ export const constantRoutes = [ { path: "/login", component: () => import("@/views/login/index"), hidden: true, }, { path: "/auth-redirect", component: () => import("@/views/login/auth-redirect"), hidden: true, }, { path: "/404", component: () => import("@/views/error-page/404"), hidden: true, }, { path: "/401", component: () => import("@/views/error-page/401"), hidden: true, }, { path: "/", component: Layout, redirect: "/dashboard", children: [ { path: "dashboard", component: () => import("@/views/dashboard/index"), name: "Dashboard", meta: { title: "Dashboard", icon: "dashboard", affix: true }, }, ], }, ]; /** * asyncRoutes * the routes that need to be dynamically loaded based on user roles */ export const asyncRoutes = [ { path: "/error", component: Layout, redirect: "noRedirect", name: "ErrorPages", meta: { title: "Error Pages", icon: "404", }, children: [ { path: "401", component: () => import("@/views/error-page/401"), name: "Page401", meta: { title: "401", noCache: true }, }, { path: "404", component: () => import("@/views/error-page/404"), name: "Page404", meta: { title: "404", noCache: true }, }, ], }, { path: "/error-log", component: Layout, children: [ { path: "log", component: () => import("@/views/error-log/index"), name: "ErrorLog", meta: { title: "Error Log", icon: "bug" }, }, ], }, // 404 page must be placed at the end !!! { path: "*", redirect: "/404", hidden: true }, ]; const createRouter = () => new Router({ // mode: 'history', // require service support scrollBehavior: () => ({ y: 0 }), routes: constantRoutes, }); const router = createRouter(); // Detail see: https://github.com/vuejs/vue-router/issues/1234#issuecomment-357941465 export function
() { const newRouter = createRouter(); router.matcher = newRouter.matcher; // reset router } export default router;
resetRouter
setup.py
import io import os import sys from shutil import rmtree from setuptools import find_packages, setup, Command # Package meta-data. NAME = 'replicable' DESCRIPTION = 'Reproducible storage of gridded and stochastically generated simulated datasets' URL = 'https://github.com/philastrophist/replicable' EMAIL = '[email protected]' AUTHOR = 'philastrophist' # What packages are required for this module to be executed? with open('requirements.txt', 'r') as f: REQUIRED = f.readlines() # The rest you shouldn't have to touch too much :) # ------------------------------------------------ # Except, perhaps the License and Trove Classifiers! # If you do change the License, remember to change the Trove Classifier for that! here = os.path.abspath(os.path.dirname(__file__)) # Import the README and use it as the long-description. # Note: this will only work if 'README.rst' is present in your MANIFEST.in file! with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = '\n' + f.read() # Load the package's __version__.py module as a dictionary. about = {} with open(os.path.join(here, NAME, '__version__.py')) as f: exec(f.read(), about) class UploadCommand(Command): """Support setup.py upload.""" description = 'Build and publish the package.' user_options = [] @staticmethod def status(s): """Prints things in bold.""" print('\033[1m{0}\033[0m'.format(s)) def initialize_options(self): pass def finalize_options(self): pass def run(self): try: self.status('Removing previous builds...') rmtree(os.path.join(here, 'dist')) except OSError: pass self.status('Building Source and Wheel (universal) distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine...') os.system('twine upload dist/*') sys.exit() # Where the magic happens: setup( name=NAME, version=about['__version__'], description=DESCRIPTION, long_description=long_description, author=AUTHOR, author_email=EMAIL, url=URL, packages=find_packages(exclude=('tests',)), #If your package is a single module, use this instead of 'packages': # py_modules=['mypackage'], entry_points={}, install_requires=REQUIRED, include_package_data=True, license='MIT', classifiers=[ # Trove classifiers # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy' ], # $ setup.py publish support. cmdclass={ 'upload': UploadCommand, }, )
'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3',
hvac.rs
use hvac::prelude::*; #[test] fn new_hvac_is_idle() { let mut hvac = Hvac::default(); let state = hvac.tick(0);
} #[test] fn new_hvac_enforces_min_heat_recover_constraints() { let mut hvac = Hvac::default().with_heat(None, Some(100)); let state = hvac.heat(); assert_eq!(state.service, None); assert_eq!(state.fan, false); for i in 0..100 { let state = hvac.tick(i); assert_eq!(state.service, None); assert_eq!(state.fan, false); } let state = hvac.tick(100); assert_eq!(state.service, Some(HvacService::Heat)); assert_eq!(state.fan, true); } #[test] fn new_hvac_enforces_min_cool_recover_constraints() { let mut hvac = Hvac::default().with_cool(None, Some(100)); let state = hvac.cool(); assert_eq!(state.service, None); assert_eq!(state.fan, false); for i in 0..100 { let state = hvac.tick(i); assert_eq!(state.service, None); assert_eq!(state.fan, false); } let state = hvac.tick(100); assert_eq!(state.service, Some(HvacService::Cool)); assert_eq!(state.fan, true); } #[test] fn new_hvac_enforces_min_fan_recover_constraints() { let mut hvac = Hvac::default().with_fan(None, Some(100)); let state = hvac.fan_auto(false); assert_eq!(state.service, None); assert_eq!(state.fan, false); for i in 0..100 { let state = hvac.tick(i); assert_eq!(state.service, None); assert_eq!(state.fan, false); } let state = hvac.tick(100); assert_eq!(state.service, None); assert_eq!(state.fan, true); } #[test] fn hvac_fan_auto_with_heat() { let mut hvac = Hvac::default().with_heat(None, None).with_fan(None, None); let state = hvac.heat(); assert_eq!(state.service, Some(HvacService::Heat)); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.service, None); assert_eq!(state.fan, false); } #[test] fn hvac_fan_auto_with_cool() { let mut hvac = Hvac::default().with_cool(None, None).with_fan(None, None); let state = hvac.cool(); assert_eq!(state.service, Some(HvacService::Cool)); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.service, None); assert_eq!(state.fan, false); } #[test] fn hvac_fan_auto_sequence() { let mut hvac = Hvac::default() .with_heat(None, None) .with_cool(None, None) .with_fan(None, None); let state = hvac.idle(); assert_eq!(state.fan, false); let state = hvac.heat(); assert_eq!(state.fan, true); let state = hvac.cool(); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.fan, false); let state = hvac.heat(); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.fan, false); let state = hvac.cool(); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.fan, false); } #[test] fn hvac_fan_manual() { let mut hvac = Hvac::default() .with_heat(None, None) .with_cool(None, None) .with_fan(None, None); let state = hvac.fan_auto(false); assert_eq!(state.service, None); assert_eq!(state.fan, true); let state = hvac.heat(); assert_eq!(state.service, Some(HvacService::Heat)); assert_eq!(state.fan, true); let state = hvac.cool(); assert_eq!(state.service, Some(HvacService::Cool)); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.service, None); assert_eq!(state.fan, true); let state = hvac.fan_auto(true); assert_eq!(state.service, None); assert_eq!(state.fan, false); } #[test] fn fan_auto_min_run_carries_past_heat() { let mut hvac = Hvac::default() .with_heat(None, None) .with_fan(Some(1), None); let state = hvac.tick(0); assert_eq!(state.fan, false); let state = hvac.heat(); assert_eq!(state.fan, true); let state = hvac.idle(); assert_eq!(state.fan, true); let state = hvac.tick(1); assert_eq!(state.fan, false); }
assert_eq!(state.service, None); assert_eq!(state.fan, false);
example_are_zones_ipv6_simple.py
#!/usr/bin/env python """Cloudflare API code - example"""
import os import sys sys.path.insert(0, os.path.abspath('..')) import CloudFlare def main(): """Cloudflare API code - example""" cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'per_page':50}) for zone in zones: zone_name = zone['name'] zone_id = zone['id'] settings_ipv6 = cf.zones.settings.ipv6.get(zone_id) ipv6_on = settings_ipv6['value'] print(zone_id, ipv6_on, zone_name) exit(0) if __name__ == '__main__': main()
from __future__ import print_function
users.py
from fastapi import Depends, HTTPException from fastapi import status, APIRouter from jose import JWTError, jwt from sqlalchemy.orm import Session from app.core.dependecies import get_db, SECRET_KEY, ALGORITHM, TokenPurpose, get_current_active_user, get_current_user, \ verify_password from app.core.internal import schemas, crud from app.game_engine.models import * router = APIRouter( prefix="/users", tags=["users"], responses={404: {"error": "Not found"}, 422: {"error": "Invalid input data"}}, ) # TODO: test @router.post("/verify/{token}") def verify_user(token: str, db: Session = Depends(get_db)): try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) email: str = payload.get("sub") purpose = payload.get("purpose") is_verifed = payload.get("hash") if email is None or purpose != TokenPurpose.ACCOUNT_VERIFICATION:
token_data = schemas.VerificationTokenData(email=email, purpose=purpose, hash=is_verifed) except JWTError: raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.") user = crud.get_user_by_email(db, token_data.email) if user is None: raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.") if user.is_verified: raise HTTPException(status_code=200, detail='Account already confirmed. Please login.') else: crud.verify_user(user=user, db=db) return {"detail": "Account verified successfully"} # TODO: test @router.post("/change_password") def change_password(change_password_schema: schemas.EmergencyChangePasswordSchema, db: Session = Depends(get_db)): try: token = change_password_schema.token payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username: str = payload.get("sub") purpose = payload.get("purpose") hash = payload.get("hash") if username is None or purpose != TokenPurpose.CHANGE_PASSWORD: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials") token_data = schemas.TokenData(username=username, purpose=purpose, hash=hash) except JWTError: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials") user = crud.get_user(db, token_data.username) if user is None: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials") if user.hashed_password != hash: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials") return crud.change_password(user, change_password_schema.newPassword, db) @router.post("", response_model=schemas.User) def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)): db_user = crud.get_user_by_email(db, email=user.email) if db_user: raise HTTPException(status_code=400, detail="Email already registered") db_user_1 = crud.get_user(db, username=user.username) if db_user_1: raise HTTPException(status_code=400, detail="This name is taken") return crud.create_user(db=db, user=user) @router.get("", response_model=List[schemas.UserGet]) def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): users = crud.get_users(db, skip=skip, limit=limit) return users @router.get("/{username}", response_model=schemas.UserGet) def read_user(username: str, db: Session = Depends(get_db)): db_user = crud.get_user(db, username=username) if db_user is None: raise HTTPException(status_code=404, detail="User not found") return db_user @router.get("/me/blocked", response_model=List[str]) async def get_users_blocked(current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)): return crud.get_blocked_users(user=current_user, db=db) @router.post("/me/block", response_model=schemas.BlockedUsers) async def block_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)): username = usernameSchema.username user_to_block = crud.get_user(username=username, db=db) if not user_to_block: raise HTTPException(status_code=404, detail="User not found") blocked = crud.get_blocked_users(current_user, db) if user_to_block.username == current_user.username: raise HTTPException(status_code=403, detail="Cannot block yourself") if username in blocked: raise HTTPException(status_code=403, detail="User already blocked") return crud.create_block_record(user=current_user, user_to_block=user_to_block, db=db) # TODO: test @router.delete("/me/unblock") async def unblock_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)): username = usernameSchema.username user_to_unblock = crud.get_user(username=username, db=db) blocked = crud.get_blocked_users(user=current_user, db=db) if not user_to_unblock: raise HTTPException(status_code=404, detail="User not found") if user_to_unblock.username not in blocked: raise HTTPException(status_code=403, detail="User not blocked") return crud.remove_block_record(user=current_user, blocked_user=user_to_unblock, db=db) @router.get("/me/info", response_model=schemas.User) async def read_users_me(current_user: schemas.User = Depends(get_current_active_user)): return current_user @router.post("/me/change_password") def change_password(change_password_schema: schemas.ChangePasswordSchema, current_user: schemas.User = Depends(get_current_user), db: Session = Depends(get_db)): db_user = crud.get_user(db=db, username=current_user.username) if not verify_password(change_password_schema.oldPassword, db_user.hashed_password): raise HTTPException(status_code=401, detail="Invalid old password") return crud.change_password(user=current_user, new_password=change_password_schema.newPassword, db=db) @router.get("/{username}/history", response_model=List[schemas.GameHistoryEntry]) def get_users_game_history(username: str, db: Session = Depends(get_db)): db_user = crud.get_user(db, username=username) if db_user is None: raise HTTPException(status_code=404, detail="User not found") history = crud.get_last_20_matches(db=db, user=db_user) return history @router.get("/{username}/stats", response_model=schemas.Stats) def get_stats(username: str, db: Session = Depends(get_db)): db_user = crud.get_user(db, username=username) if db_user is None: raise HTTPException(status_code=404, detail="User not found") return crud.get_stats(db=db, user=db_user) @router.get("/me/settings", response_model=schemas.Settings) def get_settings(current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)): return crud.get_settings(db=db, user=current_user) @router.patch("/me/settings", response_model=schemas.Settings) def update_settings(settings: schemas.Settings, current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)): return crud.update_settings(settings=settings, db=db, user=current_user) @router.get("/ranking/top", response_model=List[schemas.UserGet]) def get_top_ranked(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): users = crud.get_users_by_rating(db, skip=skip, limit=limit) return users
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
huffman.rs
use std::prelude::v1::*; use numpy::PyReadonlyArray1; use pyo3::prelude::*; use crate::symbol::huffman::{self, NanError}; pub fn init_module(_py: Python<'_>, module: &PyModule) -> PyResult<()> { module.add_class::<EncoderHuffmanTree>()?; module.add_class::<DecoderHuffmanTree>()?; Ok(()) } /// A Huffman tree that can be used for encoding data. /// /// Expects a single argument `probabilities`, which is a rank-1 numpy array with /// `dtype=np.float64` that specifies the probabilities of each one of the symbols in the /// range `{0, 1, ..., len(probabilities)-1}`. All probabilities must be nonnegative and /// finite, but probabilities do not need to add up to one since only the ratios of /// probabilities will affect the shape of the constructed Huffman tree (note, however, that /// rescaling probabilities can, in edge cases, affect the shape of the Huffman tree due /// to rounding errors, so be consistent with how you scale probabilities). /// /// # Examples /// /// See [examples](../symbol.html#examples) in parent module. #[pyclass] #[pyo3(text_signature = "(probabilities)")] #[derive(Debug)] pub struct EncoderHuffmanTree { pub(crate) inner: huffman::EncoderHuffmanTree, } #[pymethods] impl EncoderHuffmanTree { #[new] pub fn new(probabilities: PyReadonlyArray1<'_, f64>) -> PyResult<Self> { let inner = huffman::EncoderHuffmanTree::from_float_probabilities::<f64, _>( probabilities.iter().unwrap(), )?; Ok(Self { inner }) } } /// A Huffman tree that can be used for decoding data. /// /// Expects a single argument `probabilities`, which is a rank-1 numpy array with /// `dtype=np.float64` that specifies the probabilities of each one of the symbols in the /// range `{0, 1, ..., len(probabilities)-1}`. All probabilities must be nonnegative and /// finite, but probabilities do not need to add up to one since only the ratios of /// probabilities will affect the shape of the constructed Huffman tree (note, however, that /// rescaling probabilities can, in edge cases, affect the shape of the Huffman tree due /// to rounding errors, so be consistent with how you scale probabilities). /// /// # Examples /// /// See [examples](../symbol.html#examples) in parent module. #[pyclass] #[pyo3(text_signature = "(probabilities)")] #[derive(Debug)] pub struct DecoderHuffmanTree { pub(crate) inner: huffman::DecoderHuffmanTree, } #[pymethods] impl DecoderHuffmanTree { #[new] pub fn new(probabilities: PyReadonlyArray1<'_, f64>) -> PyResult<Self> { let inner = huffman::DecoderHuffmanTree::from_float_probabilities::<f64, _>( probabilities.iter().unwrap(), )?; Ok(Self { inner }) } } impl From<NanError> for PyErr { fn
(err: NanError) -> Self { match err { NanError::NaN => pyo3::exceptions::PyValueError::new_err("NaN probability provided."), } } }
from
main.rs
use crate::main_loop::relayer_main_loop; use crate::main_loop::LOOP_SPEED; use clarity::Address as EthAddress; use clarity::PrivateKey as EthPrivateKey; use docopt::Docopt; use env_logger::Env; use gravity_utils::connection_prep::{ check_for_eth, create_rpc_connections, wait_for_cosmos_node_ready, }; pub mod batch_relaying; pub mod find_latest_valset; pub mod logic_call_relaying; pub mod main_loop; pub mod valset_relaying; #[macro_use] extern crate serde_derive; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[derive(Debug, Deserialize)] struct
{ flag_ethereum_key: String, flag_cosmos_grpc: String, flag_address_prefix: String, flag_ethereum_rpc: String, flag_contract_address: String, } lazy_static! { pub static ref USAGE: String = format!( "Usage: {} --ethereum-key=<key> --cosmos-grpc=<url> --address-prefix=<prefix> --ethereum-rpc=<url> --contract-address=<addr> Options: -h --help Show this screen. --ethereum-key=<ekey> An Ethereum private key containing non-trivial funds --cosmos-grpc=<gurl> The Cosmos gRPC url --address-prefix=<prefix> The prefix for addresses on this Cosmos chain --ethereum-grpc=<eurl> The Ethereum RPC url, Geth light clients work and sync fast --contract-address=<addr> The Ethereum contract address for Gravity About: The Gravity relayer component, responsible for relaying data from the Cosmos blockchain to the Ethereum blockchain, cosmos key and fees are optional since they are only used to request the creation of batches or validator sets to relay. for Althea-Gravity. Written By: {} Version {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_AUTHORS"), env!("CARGO_PKG_VERSION"), ); } #[actix_rt::main] async fn main() { env_logger::Builder::from_env(Env::default().default_filter_or("info")).init(); // On Linux static builds we need to probe ssl certs path to be able to // do TLS stuff. openssl_probe::init_ssl_cert_env_vars(); let args: Args = Docopt::new(USAGE.as_str()) .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); let ethereum_key: EthPrivateKey = args .flag_ethereum_key .parse() .expect("Invalid Ethereum private key!"); let gravity_contract_address: EthAddress = args .flag_contract_address .parse() .expect("Invalid contract address!"); let connections = create_rpc_connections( args.flag_address_prefix, Some(args.flag_cosmos_grpc), Some(args.flag_ethereum_rpc), LOOP_SPEED, ) .await; let public_eth_key = ethereum_key .to_public_key() .expect("Invalid Ethereum Private Key!"); info!("Starting Gravity Relayer"); info!("Ethereum Address: {}", public_eth_key); let contact = connections.contact.clone().unwrap(); let web3 = connections.web3.clone().unwrap(); // check if the cosmos node is syncing, if so wait for it // we can't move any steps above this because they may fail on an incorrect // historic chain state while syncing occurs wait_for_cosmos_node_ready(&contact).await; check_for_eth(public_eth_key, &web3).await; relayer_main_loop( ethereum_key, connections.web3.unwrap(), connections.grpc.unwrap(), gravity_contract_address, 1f32, ) .await }
Args
aesr.rs
#[doc = "Reader of register AESR"] pub type R = crate::R<u8, super::AESR>; #[doc = "ADMA Error State\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ERRST_A { #[doc = "0: ST_STOP (Stop DMA)"] STOP, #[doc = "1: ST_FDS (Fetch Descriptor)"] FDS, #[doc = "3: ST_TFR (Transfer Data)"] TFR, } impl From<ERRST_A> for u8 { #[inline(always)] fn from(variant: ERRST_A) -> Self { match variant { ERRST_A::STOP => 0, ERRST_A::FDS => 1, ERRST_A::TFR => 3, } } } #[doc = "Reader of field `ERRST`"] pub type ERRST_R = crate::R<u8, ERRST_A>; impl ERRST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, ERRST_A> { use crate::Variant::*; match self.bits { 0 => Val(ERRST_A::STOP), 1 => Val(ERRST_A::FDS), 3 => Val(ERRST_A::TFR), i => Res(i), } } #[doc = "Checks if the value of the field is `STOP`"] #[inline(always)] pub fn is_stop(&self) -> bool { *self == ERRST_A::STOP } #[doc = "Checks if the value of the field is `FDS`"] #[inline(always)] pub fn is_fds(&self) -> bool
#[doc = "Checks if the value of the field is `TFR`"] #[inline(always)] pub fn is_tfr(&self) -> bool { *self == ERRST_A::TFR } } #[doc = "ADMA Length Mismatch Error\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum LMIS_A { #[doc = "0: No Error"] NO, #[doc = "1: Error"] YES, } impl From<LMIS_A> for bool { #[inline(always)] fn from(variant: LMIS_A) -> Self { match variant { LMIS_A::NO => false, LMIS_A::YES => true, } } } #[doc = "Reader of field `LMIS`"] pub type LMIS_R = crate::R<bool, LMIS_A>; impl LMIS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> LMIS_A { match self.bits { false => LMIS_A::NO, true => LMIS_A::YES, } } #[doc = "Checks if the value of the field is `NO`"] #[inline(always)] pub fn is_no(&self) -> bool { *self == LMIS_A::NO } #[doc = "Checks if the value of the field is `YES`"] #[inline(always)] pub fn is_yes(&self) -> bool { *self == LMIS_A::YES } } impl R { #[doc = "Bits 0:1 - ADMA Error State"] #[inline(always)] pub fn errst(&self) -> ERRST_R { ERRST_R::new((self.bits & 0x03) as u8) } #[doc = "Bit 2 - ADMA Length Mismatch Error"] #[inline(always)] pub fn lmis(&self) -> LMIS_R { LMIS_R::new(((self.bits >> 2) & 0x01) != 0) } }
{ *self == ERRST_A::FDS }
01-11-12.js
module.exports = {"acrossmap":null,"admin":false,"answers":{"across":["APSE","ORION","DABS","NAPA","CAMPY","ORAL","SWATCHTEAM","NORI","MAO","ARP","TMAN","PATCHONTHEBACK","BEL","KIT","PESKY","USOC","GOO","SIS","GOTOTHEMATCHFOR","LUC","GNU","YURI","OPEDS","NFC","LEG","BATCHOUTOFHELL","ORCA","PTA","IVS","ITES","ALLEYCATCH","SITE","LEONE","DOUR","TICS","SYNCS","EPEE"],"down":["ANS","PAW","SPAMALOT","EATAT","OCH","RAT","IMEAN","OPART","NYMPH","DONTBESHY","AROMAS","BARACK","SLINKY","COCK","PESO","HIGHC","OTOE","EPIC","BUG","COLDCASES","OMG","STUFF","TUSH","ANNO","FULLSTOP","OREL","RIG","OBOIST","PARTII","ETCETC","CHIC","OPALS","UTLEY","TALON","EVADE","ENC","YES","CUE","HRE"]},"author":"Chuck Deodene","autowrap":null,"bbars":null,"circles":null,"clues":{"across":["1. Semidomed area","5. Constellation with the star Rigel","10. Smidgens","14. Mecca for oenophiles","15. Like a drag revue","16. ___-B","17. Fabric store employees?","19. \"Me neither\"","20. \"Nixon in China\" role","21. Sculptor Jean","22. Fed in pursuit of counterfeiters","23. Repair for a torn pullover?","27. ___ esprit (witty one)","28. Set of parts awaiting assembly","29. Bothersome","30. Org. that oversees American athletes","32. Gunk","34. Bro's sibling","35. Attend a tennis tournament because one is a fan of?","41. \"La Femme Nikita\" director Besson","42. Serengeti herd member","43. Vostok 1's Gagarin","44. Slanted columns?","47. Dallas is in it, for short","49. Kicker","50. Cookies baked by Satan?","55. Ocean predator","56. Back-to-school night grp.","57. E.M.T. hookups","58. Mineral suffixes","59. Arrest made on a side street?","64. Online destination","65. Sierra ___","66. Grumpy","67. Muscular jerks","68. Harmonizes, as digital devices","69. Form of fencing"],"down":["1. All of the above, e.g.: Abbr.","2. Claw holder","3. 2005 Broadway hit based on a 1974 film","4. Vex","5. Edinburgh exclamation","6. Turncoat","7. \"To clarify ...\"","8. Eye-straining exhibit","9. Young termite, e.g.","10. Advice to an introvert","11. Airborne stimuli","12. President after George","13. Toy consisting of 80 feet of wire","18. One making a wake-up call?","23. Money across the border","24. Feat for a soprano","25. Plains native","26. Monumental","27. Flu","31. Dead-ended investigations","33. Text messager's \"Wow!\"","34. Cram","36. Heinie","37. ___ Domini","38. Period","39. Oka River city","40. Semi","44. Wind section player","45. Trilogy's midsection","46. Yadda, yadda, yadda","48. Fashionable","51. Milky gems","52. Five-time All-Star second baseman Chase ___","53. Avian gripper","54. Sidestep","60. S.A.S.E., for one","61. \"Getting to ___\" (best-selling business book)","62. What a walk-on awaits","63. Bygone Eur. realm"]},"code":null,"copyright":"2012, The New York Times","date":"1\/11\/2012","dow":"Wednesday","downmap":null,"editor":"Will Shortz","grid":["A","P","S","E",".","O","R","I","O","N",".","D","A","B","S","N","A","P","A",".","C","A","M","P","Y",".","O","R","A","L","S","W","A","T","C","H","T","E","A","M",".","N","O","R","I",".",".","M","A","O",".",".","A","R","P",".","T","M","A","N",".","P","A","T","C","H","O","N","T","H","E","B","A","C","K","B","E","L",".","K","I","T",".",".",".","P","E","S","K","Y","U","S","O","C",".","G","O","O",".","S","I","S",".",".",".","G","O","T","O","T","H","E","M","A","T","C","H","F","O","R",".",".",".","L","U","C",".","G","N","U",".","Y","U","R","I","O","P","E","D","S",".",".",".","N","F","C",".","L","E","G","B","A","T","C","H","O","U","T","O","F","H","E","L","L",".","O","R","C","A",".","P","T","A",".",".","I","V","S",".",".","I","T","E","S",".","A","L","L","E","Y","C","A","T","C","H","S","I","T","E",".","L","E","O","N","E",".","D","O","U","R","T","I","C","S",".","S","Y","N","C","S",".","E","P","E","E"],"gridnums":[1,2,3,4,0,5,6,7,8,9,0,10,11,12,13,14,0,0,0,0,15,0,0,0,0,0,16,0,0,0,17,0,0,0,18,0,0,0,0,0,0,19,0,0,0,0,0,20,0,0,0,0,21,0,0,0,22,0,0,0,0,23,0,0,0,24,25,0,0,0,26,0,0,0,0,27,0,0,0,28,0,0,0,0,0,29,0,0,0,0,30,0,0,31,0,32,0,33,0,34,0,0,0,0,0,35,0,0,0,36,0,0,0,37,0,0,0,38,39,40,0,0,0,41,0,0,0,42,0,0,0,43,0,0,0,44,45,46,0,0,0,0,0,47,0,48,0,49,0,0,50,0,0,0,0,51,52,53,0,0,0,54,0,0,0,55,0,0,0,0,56,0,0,0,0,57,0,0,0,0,58,0,0,0,0,59,0,0,60,61,0,0,0,62,63,64,0,0,0,0,65,0,0,0,0,0,66,0,0,0,67,0,0,0,0,68,0,0,0,0,0,69,0,0,0],"hold":null,"id":null,"id2":null,"interpretcolors":null,"jnotes":null,"key":null,"mini":null,"notepad":null,"publisher":"The New York Times","rbars":null,"shadecircles":null,"size":{"cols":15,"rows":15},"title":"NY TIMES, WED, JAN 11, 2012","track":null,"type":null}
grid-pane-view.ts
import { Pane } from '../../model/pane'; import { GridRenderer, GridRendererData } from '../../renderers/grid-renderer'; import { IPaneRenderer } from '../../renderers/ipane-renderer'; import { IUpdatablePaneView } from './iupdatable-pane-view'; export class
implements IUpdatablePaneView { private readonly _pane: Pane; private readonly _renderer: GridRenderer = new GridRenderer(); private _invalidated: boolean = true; public constructor(pane: Pane) { this._pane = pane; } public update(): void { this._invalidated = true; } public renderer(height: number, width: number): IPaneRenderer | null { if (this._invalidated) { const gridOptions = this._pane.model().options().grid; const data: GridRendererData = { h: height, w: width, horzLinesVisible: gridOptions.horzLines.visible, vertLinesVisible: gridOptions.vertLines.visible, horzLinesColor: gridOptions.horzLines.color, vertLinesColor: gridOptions.vertLines.color, horzLineStyle: gridOptions.horzLines.style, vertLineStyle: gridOptions.vertLines.style, priceMarks: this._pane.defaultPriceScale().marks(), timeMarks: this._pane.model().timeScale().marks() || [], }; this._renderer.setData(data); this._invalidated = false; } return this._renderer; } }
GridPaneView
api_op_ListTerminologies.go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package translate import ( "context" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" )
// The maximum number of custom terminologies returned per list request. MaxResults *int64 `min:"1" type:"integer"` // If the result of the request to ListTerminologies was truncated, include // the NextToken to fetch the next group of custom terminologies. NextToken *string `type:"string"` } // String returns the string representation func (s ListTerminologiesInput) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *ListTerminologiesInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "ListTerminologiesInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } type ListTerminologiesOutput struct { _ struct{} `type:"structure"` // If the response to the ListTerminologies was truncated, the NextToken fetches // the next group of custom terminologies. NextToken *string `type:"string"` // The properties list of the custom terminologies returned on the list request. TerminologyPropertiesList []TerminologyProperties `type:"list"` } // String returns the string representation func (s ListTerminologiesOutput) String() string { return awsutil.Prettify(s) } const opListTerminologies = "ListTerminologies" // ListTerminologiesRequest returns a request value for making API operation for // Amazon Translate. // // Provides a list of custom terminologies associated with your account. // // // Example sending a request using ListTerminologiesRequest. // req := client.ListTerminologiesRequest(params) // resp, err := req.Send(context.TODO()) // if err == nil { // fmt.Println(resp) // } // // Please also see https://docs.aws.amazon.com/goto/WebAPI/translate-2017-07-01/ListTerminologies func (c *Client) ListTerminologiesRequest(input *ListTerminologiesInput) ListTerminologiesRequest { op := &aws.Operation{ Name: opListTerminologies, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ListTerminologiesInput{} } req := c.newRequest(op, input, &ListTerminologiesOutput{}) return ListTerminologiesRequest{Request: req, Input: input, Copy: c.ListTerminologiesRequest} } // ListTerminologiesRequest is the request type for the // ListTerminologies API operation. type ListTerminologiesRequest struct { *aws.Request Input *ListTerminologiesInput Copy func(*ListTerminologiesInput) ListTerminologiesRequest } // Send marshals and sends the ListTerminologies API request. func (r ListTerminologiesRequest) Send(ctx context.Context) (*ListTerminologiesResponse, error) { r.Request.SetContext(ctx) err := r.Request.Send() if err != nil { return nil, err } resp := &ListTerminologiesResponse{ ListTerminologiesOutput: r.Request.Data.(*ListTerminologiesOutput), response: &aws.Response{Request: r.Request}, } return resp, nil } // ListTerminologiesResponse is the response type for the // ListTerminologies API operation. type ListTerminologiesResponse struct { *ListTerminologiesOutput response *aws.Response } // SDKResponseMetdata returns the response metadata for the // ListTerminologies request. func (r *ListTerminologiesResponse) SDKResponseMetdata() *aws.Response { return r.response }
type ListTerminologiesInput struct { _ struct{} `type:"structure"`
ptvs_virtualenv_proxy.py
# ############################################################################ # # Copyright (c) Microsoft Corporation. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # [email protected]. By using this source code in any fashion, you are agreeing to be bound
# # You must not remove this notice, or any other, from this software. # # ########################################################################### import datetime import os import sys if sys.version_info[0] == 3: def to_str(value): return value.decode(sys.getfilesystemencoding()) def execfile(path, global_dict): """Execute a file""" with open(path, 'r') as f: code = f.read() code = code.replace('\r\n', '\n') + '\n' exec(code, global_dict) else: def to_str(value): return value.encode(sys.getfilesystemencoding()) def log(txt): """Logs fatal errors to a log file if WSGI_LOG env var is defined""" log_file = os.environ.get('WSGI_LOG') if log_file: f = open(log_file, 'a+') try: f.write('%s: %s' % (datetime.datetime.now(), txt)) finally: f.close() ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET') if ptvsd_secret: log('Enabling ptvsd ...\n') try: import ptvsd try: ptvsd.enable_attach(ptvsd_secret) log('ptvsd enabled.\n') except: log('ptvsd.enable_attach failed\n') except ImportError: log('error importing ptvsd.\n'); def get_wsgi_handler(handler_name): if not handler_name: raise Exception('WSGI_HANDLER env var must be set') if not isinstance(handler_name, str): handler_name = to_str(handler_name) module_name, _, callable_name = handler_name.rpartition('.') should_call = callable_name.endswith('()') callable_name = callable_name[:-2] if should_call else callable_name name_list = [(callable_name, should_call)] handler = None while module_name: try: handler = __import__(module_name, fromlist=[name_list[0][0]]) for name, should_call in name_list: handler = getattr(handler, name) if should_call: handler = handler() break except ImportError: module_name, _, callable_name = module_name.rpartition('.') should_call = callable_name.endswith('()') callable_name = callable_name[:-2] if should_call else callable_name name_list.insert(0, (callable_name, should_call)) handler = None if handler is None: raise ValueError('"%s" could not be imported' % handler_name) return handler activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS') if not activate_this: raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set') def get_virtualenv_handler(): log('Activating virtualenv with %s\n' % activate_this) execfile(activate_this, dict(__file__=activate_this)) log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) log('Got handler: %r\n' % handler) return handler def get_venv_handler(): log('Activating venv with executable at %s\n' % activate_this) import site sys.executable = activate_this old_sys_path, sys.path = sys.path, [] site.main() sys.path.insert(0, '') for item in old_sys_path: if item not in sys.path: sys.path.append(item) log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) log('Got handler: %r\n' % handler) return handler
# by the terms of the Apache License, Version 2.0.
database.py
import sqlite3 from flask import g, Flask from constants import Constants import json DATABASE = 'db/sqlite.db' app = Flask(Constants.APP_NAME) def get_db(): db = getattr(g, '_database', None) if db is None: db = g._database = sqlite3.connect(DATABASE)
return db @app.teardown_appcontext def close_connection(exception): db = getattr(g, '_database', None) if db is not None: db.close() def serialize_result_to_individual(res,idname="id"): return {idname: res[0], "parameters":json.loads(res[1]), "elo": res[2]} class Database: @staticmethod def incr_comparisons(): cursor = get_db().cursor() cursor.execute('UPDATE stats SET num_comparisons = %d WHERE 1 == 1' % (Database.num_comparisons() + 1)) get_db().commit() @staticmethod def reset_comparisons(): cursor = get_db().cursor() cursor.execute('UPDATE stats SET num_comparisons = 0 WHERE 1 == 1') get_db().commit() @staticmethod def num_comparisons(): db = get_db() cursor = db.cursor() cursor.execute('SELECT num_comparisons FROM stats;') return cursor.fetchone()[0] @staticmethod def current_generation_is_empty(): db = get_db() cursor = db.cursor() cursor.execute('SELECT * FROM current') return not cursor.fetchone() @staticmethod def add_individual_to_current_generation(parameters): string = json.dumps(parameters) cursor = get_db().cursor() cursor.execute('INSERT INTO current (parameters, elo) VALUES (?, 1000.0)', (string,)) get_db().commit() @staticmethod def get_individual_for_id(idd): db = get_db() cursor = db.cursor() cursor.execute('SELECT id, parameters, elo FROM current WHERE id = ?', (idd,)) return serialize_result_to_individual(cursor.fetchone()) @staticmethod def update_elo_for_id(idd, elo): db = get_db() cursor = db.cursor() cursor.execute('UPDATE current SET elo = ? WHERE id = ?', (elo, idd)) db.commit() @staticmethod def get_all_individuals_sorted(): db = get_db() cursor = db.cursor() cursor.execute('SELECT id, parameters, elo FROM current ORDER BY elo DESC') return [serialize_result_to_individual(res) for res in cursor.fetchall()] @staticmethod def get_random_individuals(num): db = get_db() cursor = db.cursor() cursor.execute('SELECT id, parameters, elo FROM current ORDER BY RANDOM() LIMIT ?', (num,)) return [serialize_result_to_individual(res) for res in cursor.fetchall()] @staticmethod def delete_individuals(individuals): cursor = get_db().cursor() id_list = ", ".join(map(lambda x: str(x["id"]), individuals)) cursor.execute('DELETE FROM current WHERE id IN (%s)' % id_list) get_db().commit() @staticmethod def get_historical_individuals(): db = get_db() cursor = db.cursor() cursor.execute('SELECT gen, parameters, elo FROM historical ORDER BY gen') return [serialize_result_to_individual(res,"gen") for res in cursor.fetchall()] @staticmethod def add_historical_individual(individual): string = json.dumps(individual['parameters']) elo = individual['elo'] cursor = get_db().cursor() cursor.execute('INSERT INTO historical (parameters, elo) VALUES (?, ?)', (string,elo)) get_db().commit() @staticmethod def record_decision(winner, loser): db = get_db() cursor = db.cursor() data = (winner["id"],json.dumps(winner["parameters"]),loser["id"],json.dumps(loser["parameters"])) cursor.execute('INSERT INTO decisions (winner_id, winner_parameters, loser_id, loser_parameters) VALUES (?, ?, ?, ?)', data)
browserify.js
var gulp = require('gulp'); var browserify = require('browserify'); var through2 = require('through2'); var rename = require('gulp-rename'); var config = require('../config').app; gulp.task('browserify', function () { return gulp.src([config.init]) .pipe(through2.obj(function (file, enc, next) { browserify({entries: file.path, standalone: "sparks"}) .bundle(function(err, res){ file.contents = res; next(null, file); }); }))
.pipe(rename('sparks.js')) .pipe(gulp.dest(config.dest)); });
nqueens.rs
/* The eight queens' problem is the task of positioning eight chess queens on an 8/8 chessboard so that no two queens threaten each other. Consequently, no two queens must share the same row, column, or diagonal in order for a solution to be found. The eight queens problem is a variation on the two queens problem, which entails arranging two non-attacking queens on a board. */ const N : usize = 8; // Solutions exist for all natural numbers N with the exception of N = 2 and N = 3. fn
(mut board: &mut [[bool; N]; N], r: usize, mut cnt: &mut i64){ if r == N { *cnt += 1; println!("Answer {}\n", *cnt); for i in board.iter(){ println!("{}", i.iter().map(|&x| if x {"Q"} else{"."}.to_string()) .collect::<Vec<String>>() .join(" ") ) } println!(" "); return; } for i in 0..N { let mut det: bool = true; for j in 0..r { if board[j][i] || i + j >= r && board[j][i + j - r] || i + r < N + j && board[j][i + r - j] { det = false; } } if det { board[r][i] = true; t(&mut board, r + 1, &mut cnt); board[r][i] = false; } } } fn main() { let mut board: [[bool; N]; N] = [[false; N]; N]; let mut cnt: i64 = 0; t(&mut board, 0, &mut cnt); } /* Sample Output of the following code :- Answer 1 Q . . . . . . . . . . . Q . . . . . . . . . . Q . . . . . Q . . . . Q . . . . . . . . . . . Q . . Q . . . . . . . . . Q . . . . Answer 2 Q . . . . . . . . . . . . Q . . . . . . . . . Q . . Q . . . . . . . . . . . Q . . . . Q . . . . . Q . . . . . . . . . . Q . . . Answer 3 Q . . . . . . . . . . . . . Q . . . . Q . . . . . . . . . Q . . . . . . . . . Q . Q . . . . . . . . . . Q . . . . . Q . . . . . Answer 4 Q . . . . . . . . . . . . . Q . . . . . Q . . . . . . . . . . Q . Q . . . . . . . . . Q . . . . . . . . . Q . . . . Q . . . . . Answer 5 . Q . . . . . . . . . Q . . . . . . . . . Q . . . . . . . . . Q . . Q . . . . . Q . . . . . . . . . . . . . Q . . . . . Q . . . */
t
GMXToPython.py
import xml.etree.ElementTree as ET import os from Element import Element class GMXToPython(object): def __init__(self, xmlFile): self.gmxroot = ET.parse(xmlFile).getroot() self.root = Element(self.gmxroot) for child in self.gmxroot: self.process(child, self.root) def process(self, element, parent): elem = Element(element) elem.parent = parent parent.children.append(elem) elem.generation = parent.generation +1 elem.generateCleanText() if elem.parent == self.root: elem.primogen = elem.tag else:
elem.primogen = parent.primogen for child in element: self.process(child, elem)
test_attention.py
""" Here come the tests for attention types and their compatibility """ import unittest import torch from torch.autograd import Variable import onmt class TestAttention(unittest.TestCase): def
(self): source_lengths = torch.IntTensor([7, 3, 5, 2]) # illegal_weights_mask = torch.ByteTensor([ # [0, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 1, 1, 1, 1], # [0, 0, 0, 0, 0, 1, 1], # [0, 0, 1, 1, 1, 1, 1]]) batch_size = source_lengths.size(0) dim = 20 memory_bank = Variable(torch.randn(batch_size, source_lengths.max(), dim)) hidden = Variable(torch.randn(batch_size, dim)) attn = onmt.modules.Attention(dim) _, alignments = attn(hidden, memory_bank, memory_lengths=source_lengths) # TODO: fix for pytorch 0.3 # illegal_weights = alignments.masked_select(illegal_weights_mask) # self.assertEqual(0.0, illegal_weights.data.sum())
test_masked_global_attention
ports.go
package list import ( "strconv" "github.com/loft-sh/devspace/cmd/flags" "github.com/loft-sh/devspace/pkg/util/factory" "github.com/loft-sh/devspace/pkg/util/log" "github.com/loft-sh/devspace/pkg/util/message" "github.com/pkg/errors" "github.com/spf13/cobra" ) type portsCmd struct { *flags.GlobalFlags } func newPortsCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command { cmd := &portsCmd{GlobalFlags: globalFlags} portsCmd := &cobra.Command{ Use: "ports", Short: "Lists port forwarding configurations", Long: ` ####################################################### ############### devspace list ports ################### ####################################################### Lists the port forwarding configurations ####################################################### `, Args: cobra.NoArgs, RunE: func(cobraCmd *cobra.Command, args []string) error { return cmd.RunListPort(f, cobraCmd, args) }} return portsCmd } // RunListPort runs the list port command logic func (cmd *portsCmd) RunListPort(f factory.Factory, cobraCmd *cobra.Command, args []string) error { logger := f.GetLog() // Set config root configLoader := f.NewConfigLoader(cmd.ToConfigOptions(), logger) configExists, err := configLoader.SetDevSpaceRoot() if err != nil { return err } if !configExists { return errors.New(message.ConfigNotFound) } config, err := configLoader.Load() if err != nil { return err } if config.Dev.Ports == nil || len(config.Dev.Ports) == 0
headerColumnNames := []string{ "Image", "LabelSelector", "Ports (Local:Remote)", } portForwards := make([][]string, 0, len(config.Dev.Ports)) // Transform values into string arrays for _, value := range config.Dev.Ports { selector := "" for k, v := range value.LabelSelector { if len(selector) > 0 { selector += ", " } selector += k + "=" + v } portMappings := "" if value.PortMappings != nil { for _, v := range value.PortMappings { if len(portMappings) > 0 { portMappings += ", " } remotePort := *v.LocalPort if v.RemotePort != nil { remotePort = *v.RemotePort } portMappings += strconv.Itoa(*v.LocalPort) + ":" + strconv.Itoa(remotePort) } } portForwards = append(portForwards, []string{ value.ImageName, selector, portMappings, }) } log.PrintTable(logger, headerColumnNames, portForwards) return nil }
{ logger.Info("No ports are forwarded. Run `devspace add port` to add a port that should be forwarded\n") return nil }
0004_auto_20200618_0223.py
# Generated by Django 3.0.6 on 2020-06-18 01:23 from django.db import migrations, models class
(migrations.Migration): dependencies = [ ('RestAPIS', '0003_auto_20200618_0222'), ] operations = [ migrations.AlterField( model_name='verses_learned', name='state', field=models.IntegerField(blank=True, default=0, null=True), ), migrations.AlterField( model_name='verses_marked', name='state', field=models.IntegerField(blank=True, default=0, null=True), ), ]
Migration
app.go
package app import ( "fmt" "io" stdlog "log" "os" "path/filepath" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" "github.com/cosmos/cosmos-sdk/client/rpc" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" servertypes "github.com/cosmos/cosmos-sdk/server/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/version" "github.com/cosmos/cosmos-sdk/x/auth" authante "github.com/cosmos/cosmos-sdk/x/auth/ante" authrest "github.com/cosmos/cosmos-sdk/x/auth/client/rest" authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/auth/vesting" "github.com/cosmos/cosmos-sdk/x/bank" bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/cosmos/cosmos-sdk/x/capability" capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" "github.com/cosmos/cosmos-sdk/x/crisis" crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" distr "github.com/cosmos/cosmos-sdk/x/distribution" distrclient "github.com/cosmos/cosmos-sdk/x/distribution/client" distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" "github.com/cosmos/cosmos-sdk/x/evidence" evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" "github.com/cosmos/cosmos-sdk/x/genutil" genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" "github.com/cosmos/cosmos-sdk/x/gov" govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" "github.com/cosmos/cosmos-sdk/x/mint" mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" "github.com/cosmos/cosmos-sdk/x/params" paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" "github.com/cosmos/cosmos-sdk/x/slashing" slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" "github.com/cosmos/cosmos-sdk/x/staking" stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" "github.com/cosmos/cosmos-sdk/x/upgrade" upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" transfer "github.com/cosmos/ibc-go/modules/apps/transfer" ibctransferkeeper "github.com/cosmos/ibc-go/modules/apps/transfer/keeper" ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types" ibc "github.com/cosmos/ibc-go/modules/core" ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" ibcclientclient "github.com/cosmos/ibc-go/modules/core/02-client/client" ibcclienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types" ibchost "github.com/cosmos/ibc-go/modules/core/24-host" ibckeeper "github.com/cosmos/ibc-go/modules/core/keeper" abci "github.com/tendermint/tendermint/abci/types" tmjson "github.com/tendermint/tendermint/libs/json" tmlog "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" "github.com/kava-labs/kava/app/ante" kavaparams "github.com/kava-labs/kava/app/params" "github.com/kava-labs/kava/x/auction" auctionkeeper "github.com/kava-labs/kava/x/auction/keeper" auctiontypes "github.com/kava-labs/kava/x/auction/types" "github.com/kava-labs/kava/x/bep3" bep3keeper "github.com/kava-labs/kava/x/bep3/keeper" bep3types "github.com/kava-labs/kava/x/bep3/types" "github.com/kava-labs/kava/x/cdp" cdpkeeper "github.com/kava-labs/kava/x/cdp/keeper" cdptypes "github.com/kava-labs/kava/x/cdp/types" "github.com/kava-labs/kava/x/committee" committeeclient "github.com/kava-labs/kava/x/committee/client" committeekeeper "github.com/kava-labs/kava/x/committee/keeper" committeetypes "github.com/kava-labs/kava/x/committee/types" "github.com/kava-labs/kava/x/hard" hardkeeper "github.com/kava-labs/kava/x/hard/keeper" hardtypes "github.com/kava-labs/kava/x/hard/types" "github.com/kava-labs/kava/x/incentive" incentivekeeper "github.com/kava-labs/kava/x/incentive/keeper" incentivetypes "github.com/kava-labs/kava/x/incentive/types" issuance "github.com/kava-labs/kava/x/issuance" issuancekeeper "github.com/kava-labs/kava/x/issuance/keeper" issuancetypes "github.com/kava-labs/kava/x/issuance/types" "github.com/kava-labs/kava/x/kavadist" kavadistclient "github.com/kava-labs/kava/x/kavadist/client" kavadistkeeper "github.com/kava-labs/kava/x/kavadist/keeper" kavadisttypes "github.com/kava-labs/kava/x/kavadist/types" pricefeed "github.com/kava-labs/kava/x/pricefeed" pricefeedkeeper "github.com/kava-labs/kava/x/pricefeed/keeper" pricefeedtypes "github.com/kava-labs/kava/x/pricefeed/types" "github.com/kava-labs/kava/x/swap" swapkeeper "github.com/kava-labs/kava/x/swap/keeper" swaptypes "github.com/kava-labs/kava/x/swap/types" validatorvesting "github.com/kava-labs/kava/x/validator-vesting" ) const ( appName = "kava" upgradeName = "v44" ) var ( // DefaultNodeHome default home directories for the application daemon DefaultNodeHome string // ModuleBasics manages simple versions of full app modules. // It's used for things such as codec registration and genesis file verification. ModuleBasics = module.NewBasicManager( genutil.AppModuleBasic{}, auth.AppModuleBasic{}, bank.AppModuleBasic{}, capability.AppModuleBasic{}, staking.AppModuleBasic{}, mint.AppModuleBasic{}, distr.AppModuleBasic{}, gov.NewAppModuleBasic( paramsclient.ProposalHandler, distrclient.ProposalHandler, upgradeclient.ProposalHandler, upgradeclient.CancelProposalHandler, ibcclientclient.UpdateClientProposalHandler, ibcclientclient.UpgradeProposalHandler, kavadistclient.ProposalHandler, committeeclient.ProposalHandler, ), params.AppModuleBasic{}, crisis.AppModuleBasic{}, slashing.AppModuleBasic{}, ibc.AppModuleBasic{}, upgrade.AppModuleBasic{}, evidence.AppModuleBasic{}, transfer.AppModuleBasic{}, vesting.AppModuleBasic{}, kavadist.AppModuleBasic{}, auction.AppModuleBasic{}, issuance.AppModuleBasic{}, bep3.AppModuleBasic{}, pricefeed.AppModuleBasic{},
swap.AppModuleBasic{}, cdp.AppModuleBasic{}, hard.AppModuleBasic{}, committee.AppModuleBasic{}, incentive.AppModuleBasic{}, validatorvesting.AppModuleBasic{}, ) // module account permissions // If these are changed, the permissions stored in accounts // must also be migrated during a chain upgrade. mAccPerms = map[string][]string{ authtypes.FeeCollectorName: nil, distrtypes.ModuleName: nil, minttypes.ModuleName: {authtypes.Minter}, stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking}, stakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking}, govtypes.ModuleName: {authtypes.Burner}, ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, kavadisttypes.KavaDistMacc: {authtypes.Minter}, auctiontypes.ModuleName: nil, issuancetypes.ModuleAccountName: {authtypes.Minter, authtypes.Burner}, bep3types.ModuleName: {authtypes.Burner, authtypes.Minter}, swaptypes.ModuleName: nil, cdptypes.ModuleName: {authtypes.Minter, authtypes.Burner}, cdptypes.LiquidatorMacc: {authtypes.Minter, authtypes.Burner}, hardtypes.ModuleAccountName: {authtypes.Minter}, } ) // Verify app interface at compile time // var _ simapp.App = (*App)(nil) // TODO var _ servertypes.Application = (*App)(nil) // Options bundles several configuration params for an App. // The zero value can be used as a sensible default. type Options struct { SkipLoadLatest bool SkipUpgradeHeights map[int64]bool SkipGenesisInvariants bool InvariantCheckPeriod uint MempoolEnableAuth bool MempoolAuthAddresses []sdk.AccAddress } // App is the Kava ABCI application. type App struct { *baseapp.BaseApp // codec legacyAmino *codec.LegacyAmino appCodec codec.Codec interfaceRegistry types.InterfaceRegistry // keys to access the substores keys map[string]*sdk.KVStoreKey tkeys map[string]*sdk.TransientStoreKey memKeys map[string]*sdk.MemoryStoreKey // keepers from all the modules accountKeeper authkeeper.AccountKeeper bankKeeper bankkeeper.Keeper capabilityKeeper *capabilitykeeper.Keeper stakingKeeper stakingkeeper.Keeper mintKeeper mintkeeper.Keeper distrKeeper distrkeeper.Keeper govKeeper govkeeper.Keeper paramsKeeper paramskeeper.Keeper crisisKeeper crisiskeeper.Keeper slashingKeeper slashingkeeper.Keeper ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly upgradeKeeper upgradekeeper.Keeper evidenceKeeper evidencekeeper.Keeper transferKeeper ibctransferkeeper.Keeper kavadistKeeper kavadistkeeper.Keeper auctionKeeper auctionkeeper.Keeper issuanceKeeper issuancekeeper.Keeper bep3Keeper bep3keeper.Keeper pricefeedKeeper pricefeedkeeper.Keeper swapKeeper swapkeeper.Keeper cdpKeeper cdpkeeper.Keeper hardKeeper hardkeeper.Keeper committeeKeeper committeekeeper.Keeper incentiveKeeper incentivekeeper.Keeper // make scoped keepers public for test purposes ScopedIBCKeeper capabilitykeeper.ScopedKeeper ScopedTransferKeeper capabilitykeeper.ScopedKeeper // the module manager mm *module.Manager // simulation manager sm *module.SimulationManager // configurator configurator module.Configurator } func init() { userHomeDir, err := os.UserHomeDir() if err != nil { stdlog.Printf("Failed to get home dir %v", err) } DefaultNodeHome = filepath.Join(userHomeDir, ".kava") } // NewApp returns a reference to an initialized App. func NewApp( logger tmlog.Logger, db dbm.DB, homePath string, traceStore io.Writer, encodingConfig kavaparams.EncodingConfig, options Options, baseAppOptions ...func(*baseapp.BaseApp), ) *App { appCodec := encodingConfig.Marshaler legacyAmino := encodingConfig.Amino interfaceRegistry := encodingConfig.InterfaceRegistry bApp := baseapp.NewBaseApp(appName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...) bApp.SetCommitMultiStoreTracer(traceStore) bApp.SetVersion(version.Version) bApp.SetInterfaceRegistry(interfaceRegistry) keys := sdk.NewKVStoreKeys( authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, kavadisttypes.StoreKey, auctiontypes.StoreKey, issuancetypes.StoreKey, bep3types.StoreKey, pricefeedtypes.StoreKey, swaptypes.StoreKey, cdptypes.StoreKey, hardtypes.StoreKey, committeetypes.StoreKey, incentivetypes.StoreKey, ) tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey) memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) var app = &App{ BaseApp: bApp, legacyAmino: legacyAmino, appCodec: appCodec, interfaceRegistry: interfaceRegistry, keys: keys, tkeys: tkeys, memKeys: memKeys, } // init params keeper and subspaces app.paramsKeeper = paramskeeper.NewKeeper( appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey], ) authSubspace := app.paramsKeeper.Subspace(authtypes.ModuleName) bankSubspace := app.paramsKeeper.Subspace(banktypes.ModuleName) stakingSubspace := app.paramsKeeper.Subspace(stakingtypes.ModuleName) mintSubspace := app.paramsKeeper.Subspace(minttypes.ModuleName) distrSubspace := app.paramsKeeper.Subspace(distrtypes.ModuleName) slashingSubspace := app.paramsKeeper.Subspace(slashingtypes.ModuleName) govSubspace := app.paramsKeeper.Subspace(govtypes.ModuleName).WithKeyTable(govtypes.ParamKeyTable()) crisisSubspace := app.paramsKeeper.Subspace(crisistypes.ModuleName) kavadistSubspace := app.paramsKeeper.Subspace(kavadisttypes.ModuleName) auctionSubspace := app.paramsKeeper.Subspace(auctiontypes.ModuleName) issuanceSubspace := app.paramsKeeper.Subspace(issuancetypes.ModuleName) bep3Subspace := app.paramsKeeper.Subspace(bep3types.ModuleName) pricefeedSubspace := app.paramsKeeper.Subspace(pricefeedtypes.ModuleName) swapSubspace := app.paramsKeeper.Subspace(swaptypes.ModuleName) cdpSubspace := app.paramsKeeper.Subspace(cdptypes.ModuleName) hardSubspace := app.paramsKeeper.Subspace(hardtypes.ModuleName) incentiveSubspace := app.paramsKeeper.Subspace(incentivetypes.ModuleName) ibcSubspace := app.paramsKeeper.Subspace(ibchost.ModuleName) ibctransferSubspace := app.paramsKeeper.Subspace(ibctransfertypes.ModuleName) bApp.SetParamStore( app.paramsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramskeeper.ConsensusParamsKeyTable()), ) app.capabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey]) scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibchost.ModuleName) scopedTransferKeeper := app.capabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) app.capabilityKeeper.Seal() // add keepers app.accountKeeper = authkeeper.NewAccountKeeper( appCodec, keys[authtypes.StoreKey], authSubspace, authtypes.ProtoBaseAccount, mAccPerms, ) app.bankKeeper = bankkeeper.NewBaseKeeper( appCodec, keys[banktypes.StoreKey], app.accountKeeper, bankSubspace, app.loadBlockedMaccAddrs(), ) app.stakingKeeper = stakingkeeper.NewKeeper( appCodec, keys[stakingtypes.StoreKey], app.accountKeeper, app.bankKeeper, stakingSubspace, ) app.mintKeeper = mintkeeper.NewKeeper( appCodec, keys[minttypes.StoreKey], mintSubspace, &app.stakingKeeper, app.accountKeeper, app.bankKeeper, authtypes.FeeCollectorName, ) app.distrKeeper = distrkeeper.NewKeeper( appCodec, keys[distrtypes.StoreKey], distrSubspace, app.accountKeeper, app.bankKeeper, &app.stakingKeeper, authtypes.FeeCollectorName, app.ModuleAccountAddrs(), ) app.slashingKeeper = slashingkeeper.NewKeeper( appCodec, keys[slashingtypes.StoreKey], &app.stakingKeeper, slashingSubspace, ) app.crisisKeeper = crisiskeeper.NewKeeper( crisisSubspace, options.InvariantCheckPeriod, app.bankKeeper, authtypes.FeeCollectorName, ) app.upgradeKeeper = upgradekeeper.NewKeeper( options.SkipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp, ) app.evidenceKeeper = *evidencekeeper.NewKeeper( appCodec, keys[evidencetypes.StoreKey], &app.stakingKeeper, app.slashingKeeper, ) app.ibcKeeper = ibckeeper.NewKeeper( appCodec, keys[ibchost.StoreKey], ibcSubspace, app.stakingKeeper, app.upgradeKeeper, scopedIBCKeeper, ) app.kavadistKeeper = kavadistkeeper.NewKeeper( appCodec, keys[kavadisttypes.StoreKey], kavadistSubspace, app.bankKeeper, app.accountKeeper, app.distrKeeper, app.ModuleAccountAddrs(), ) govRouter := govtypes.NewRouter() govRouter. AddRoute(govtypes.RouterKey, govtypes.ProposalHandler). AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)). AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper)). AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.ibcKeeper.ClientKeeper)). AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).AddRoute(kavadisttypes.RouterKey, kavadist.NewCommunityPoolMultiSpendProposalHandler(app.kavadistKeeper)). AddRoute(committeetypes.RouterKey, committee.NewProposalHandler(app.committeeKeeper)) app.govKeeper = govkeeper.NewKeeper( appCodec, keys[govtypes.StoreKey], govSubspace, app.accountKeeper, app.bankKeeper, &app.stakingKeeper, govRouter, ) app.transferKeeper = ibctransferkeeper.NewKeeper( appCodec, keys[ibctransfertypes.StoreKey], ibctransferSubspace, app.ibcKeeper.ChannelKeeper, &app.ibcKeeper.PortKeeper, app.accountKeeper, app.bankKeeper, scopedTransferKeeper, ) transferModule := transfer.NewAppModule(app.transferKeeper) // Create static IBC router, add transfer route, then set and seal it ibcRouter := porttypes.NewRouter() ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferModule) app.ibcKeeper.SetRouter(ibcRouter) app.auctionKeeper = auctionkeeper.NewKeeper( appCodec, keys[auctiontypes.StoreKey], auctionSubspace, app.bankKeeper, app.accountKeeper, ) app.issuanceKeeper = issuancekeeper.NewKeeper( appCodec, keys[issuancetypes.StoreKey], issuanceSubspace, app.accountKeeper, app.bankKeeper, ) app.bep3Keeper = bep3keeper.NewKeeper( appCodec, keys[bep3types.StoreKey], app.bankKeeper, app.accountKeeper, bep3Subspace, app.ModuleAccountAddrs(), ) app.pricefeedKeeper = pricefeedkeeper.NewKeeper( appCodec, keys[pricefeedtypes.StoreKey], pricefeedSubspace, ) swapKeeper := swapkeeper.NewKeeper( appCodec, keys[swaptypes.StoreKey], swapSubspace, app.accountKeeper, app.bankKeeper, ) cdpKeeper := cdpkeeper.NewKeeper( appCodec, keys[cdptypes.StoreKey], cdpSubspace, app.pricefeedKeeper, app.auctionKeeper, app.bankKeeper, app.accountKeeper, mAccPerms, ) hardKeeper := hardkeeper.NewKeeper( appCodec, keys[hardtypes.StoreKey], hardSubspace, app.accountKeeper, app.bankKeeper, app.pricefeedKeeper, app.auctionKeeper, ) app.incentiveKeeper = incentivekeeper.NewKeeper( appCodec, keys[incentivetypes.StoreKey], incentiveSubspace, app.bankKeeper, &cdpKeeper, &hardKeeper, app.accountKeeper, app.stakingKeeper, &swapKeeper, ) // create committee keeper with router committeeGovRouter := govtypes.NewRouter() committeeGovRouter. AddRoute(govtypes.RouterKey, govtypes.ProposalHandler). AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)). AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)). AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper)) // Note: the committee proposal handler is not registered on the committee router. This means committees cannot create or update other committees. // Adding the committee proposal handler to the router is possible but awkward as the handler depends on the keeper which depends on the handler. app.committeeKeeper = committeekeeper.NewKeeper( appCodec, keys[committeetypes.StoreKey], committeeGovRouter, app.paramsKeeper, app.accountKeeper, app.bankKeeper, ) // register the staking hooks // NOTE: These keepers are passed by reference above, so they will contain these hooks. app.stakingKeeper = *(app.stakingKeeper.SetHooks( stakingtypes.NewMultiStakingHooks(app.distrKeeper.Hooks(), app.slashingKeeper.Hooks(), app.incentiveKeeper.Hooks()))) app.swapKeeper = *swapKeeper.SetHooks(app.incentiveKeeper.Hooks()) app.cdpKeeper = *cdpKeeper.SetHooks(cdptypes.NewMultiCDPHooks(app.incentiveKeeper.Hooks())) app.hardKeeper = *hardKeeper.SetHooks(hardtypes.NewMultiHARDHooks(app.incentiveKeeper.Hooks())) // create the module manager (Note: Any module instantiated in the module manager that is later modified // must be passed by reference here.) app.mm = module.NewManager( genutil.NewAppModule(app.accountKeeper, app.stakingKeeper, app.BaseApp.DeliverTx, encodingConfig.TxConfig), auth.NewAppModule(appCodec, app.accountKeeper, nil), bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper), capability.NewAppModule(appCodec, *app.capabilityKeeper), staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper), mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper), distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper), gov.NewAppModule(appCodec, app.govKeeper, app.accountKeeper, app.bankKeeper), params.NewAppModule(app.paramsKeeper), crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants), slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper), ibc.NewAppModule(app.ibcKeeper), upgrade.NewAppModule(app.upgradeKeeper), evidence.NewAppModule(app.evidenceKeeper), transferModule, vesting.NewAppModule(app.accountKeeper, app.bankKeeper), params.NewAppModule(app.paramsKeeper), kavadist.NewAppModule(app.kavadistKeeper, app.accountKeeper), auction.NewAppModule(app.auctionKeeper, app.accountKeeper, app.bankKeeper), issuance.NewAppModule(app.issuanceKeeper, app.accountKeeper, app.bankKeeper), bep3.NewAppModule(app.bep3Keeper, app.accountKeeper, app.bankKeeper), pricefeed.NewAppModule(app.pricefeedKeeper, app.accountKeeper), validatorvesting.NewAppModule(app.bankKeeper), swap.NewAppModule(app.swapKeeper, app.accountKeeper), cdp.NewAppModule(app.cdpKeeper, app.accountKeeper, app.pricefeedKeeper, app.bankKeeper), hard.NewAppModule(app.hardKeeper, app.accountKeeper, app.bankKeeper, app.pricefeedKeeper), committee.NewAppModule(app.committeeKeeper, app.accountKeeper), incentive.NewAppModule(app.incentiveKeeper, app.accountKeeper, app.bankKeeper, app.cdpKeeper), ) // Warning: Some begin blockers must run before others. Ensure the dependencies are understood before modifying this list. app.mm.SetOrderBeginBlockers( // Upgrade begin blocker runs migrations on the first block after an upgrade. It should run before any other module. upgradetypes.ModuleName, // Capability begin blocker runs non state changing initialization. capabilitytypes.ModuleName, // Committee begin blocker changes module params by enacting proposals. // Run before to ensure params are updated together before state changes. committeetypes.ModuleName, minttypes.ModuleName, distrtypes.ModuleName, // During begin block slashing happens after distr.BeginBlocker so that // there is nothing left over in the validator fee pool, so as to keep the // CanWithdrawInvariant invariant. slashingtypes.ModuleName, evidencetypes.ModuleName, stakingtypes.ModuleName, kavadisttypes.ModuleName, // Auction begin blocker will close out expired auctions and pay debt back to cdp. // It should be run before cdp begin blocker which cancels out debt with stable and starts more auctions. auctiontypes.ModuleName, cdptypes.ModuleName, bep3types.ModuleName, hardtypes.ModuleName, issuancetypes.ModuleName, incentivetypes.ModuleName, ibchost.ModuleName, ) // Warning: Some end blockers must run before others. Ensure the dependencies are understood before modifying this list. app.mm.SetOrderEndBlockers( crisistypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, pricefeedtypes.ModuleName, ) // Warning: Some init genesis methods must run before others. Ensure the dependencies are understood before modifying this list app.mm.SetOrderInitGenesis( capabilitytypes.ModuleName, // initialize capabilities, run before any module creating or claiming capabilities in InitGenesis authtypes.ModuleName, // loads all accounts, run before any module with a module account banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, // iterates over validators, run after staking govtypes.ModuleName, minttypes.ModuleName, ibchost.ModuleName, evidencetypes.ModuleName, ibctransfertypes.ModuleName, kavadisttypes.ModuleName, auctiontypes.ModuleName, issuancetypes.ModuleName, bep3types.ModuleName, pricefeedtypes.ModuleName, swaptypes.ModuleName, cdptypes.ModuleName, // reads market prices, so must run after pricefeed genesis hardtypes.ModuleName, incentivetypes.ModuleName, // reads cdp params, so must run after cdp genesis committeetypes.ModuleName, genutiltypes.ModuleName, // runs arbitrary txs included in genisis state, so run after modules have been initialized crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules ) app.mm.RegisterInvariants(&app.crisisKeeper) app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) app.mm.RegisterServices(app.configurator) // create the simulation manager and define the order of the modules for deterministic simulations // // NOTE: This is not required for apps that don't use the simulator for fuzz testing // transactions. // TODO // app.sm = module.NewSimulationManager( // auth.NewAppModule(app.accountKeeper), // bank.NewAppModule(app.bankKeeper, app.accountKeeper), // gov.NewAppModule(app.govKeeper, app.accountKeeper, app.accountKeeper, app.bankKeeper), // mint.NewAppModule(app.mintKeeper), // distr.NewAppModule(app.distrKeeper, app.accountKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper), // staking.NewAppModule(app.stakingKeeper, app.accountKeeper, app.accountKeeper, app.bankKeeper), // slashing.NewAppModule(app.slashingKeeper, app.accountKeeper, app.stakingKeeper), // ) // app.sm.RegisterStoreDecoders() // initialize stores app.MountKVStores(keys) app.MountTransientStores(tkeys) app.MountMemoryStores(memKeys) // initialize the app var fetchers []ante.AddressFetcher if options.MempoolEnableAuth { fetchers = append(fetchers, func(sdk.Context) []sdk.AccAddress { return options.MempoolAuthAddresses }, app.bep3Keeper.GetAuthorizedAddresses, app.pricefeedKeeper.GetAuthorizedAddresses, ) } antehandler, err := ante.NewAnteHandler( app.accountKeeper, app.bankKeeper, nil, app.ibcKeeper.ChannelKeeper, encodingConfig.TxConfig.SignModeHandler(), authante.DefaultSigVerificationGasConsumer, fetchers..., ) if err != nil { panic(fmt.Sprintf("failed to create antehandler: %s", err)) } app.SetAnteHandler(antehandler) app.SetInitChainer(app.InitChainer) app.SetBeginBlocker(app.BeginBlocker) app.SetEndBlocker(app.EndBlocker) // load store if !options.SkipLoadLatest { if err := app.LoadLatestVersion(); err != nil { panic(fmt.Sprintf("failed to load latest version: %s", err)) } } app.ScopedIBCKeeper = scopedIBCKeeper app.ScopedTransferKeeper = scopedTransferKeeper return app } // BeginBlocker contains app specific logic for the BeginBlock abci call. func (app *App) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { return app.mm.BeginBlock(ctx, req) } // EndBlocker contains app specific logic for the EndBlock abci call. func (app *App) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { return app.mm.EndBlock(ctx, req) } // InitChainer contains app specific logic for the InitChain abci call. func (app *App) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { var genesisState GenesisState if err := tmjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { panic(err) } app.upgradeKeeper.SetModuleVersionMap(ctx, app.mm.GetVersionMap()) return app.mm.InitGenesis(ctx, app.appCodec, genesisState) } // LoadHeight loads the app state for a particular height. func (app *App) LoadHeight(height int64) error { return app.LoadVersion(height) } // ModuleAccountAddrs returns all the app's module account addresses. func (app *App) ModuleAccountAddrs() map[string]bool { modAccAddrs := make(map[string]bool) for acc := range mAccPerms { modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true } return modAccAddrs } // InterfaceRegistry returns the app's InterfaceRegistry. func (app *App) InterfaceRegistry() types.InterfaceRegistry { return app.interfaceRegistry } // SimulationManager implements the SimulationApp interface. func (app *App) SimulationManager() *module.SimulationManager { return app.sm } // RegisterAPIRoutes registers all application module routes with the provided API server. func (app *App) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) { clientCtx := apiSvr.ClientCtx // Register legacy REST routes rpc.RegisterRoutes(clientCtx, apiSvr.Router) authrest.RegisterTxRoutes(clientCtx, apiSvr.Router) ModuleBasics.RegisterRESTRoutes(clientCtx, apiSvr.Router) RegisterLegacyTxRoutes(clientCtx, apiSvr.Router) // Register GRPC Gateway routes tmservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) ModuleBasics.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) // Swagger API configuration is ignored } // RegisterTxService implements the Application.RegisterTxService method. // It registers transaction related endpoints on the app's grpc server. func (app *App) RegisterTxService(clientCtx client.Context) { authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry) } // RegisterTendermintService implements the Application.RegisterTendermintService method. // It registers the standard tendermint grpc endpoints on the app's grpc server. func (app *App) RegisterTendermintService(clientCtx client.Context) { tmservice.RegisterTendermintService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.interfaceRegistry) } // loadBlockedMaccAddrs returns a map indicating the blocked status of each module account address func (app *App) loadBlockedMaccAddrs() map[string]bool { modAccAddrs := app.ModuleAccountAddrs() kavadistMaccAddr := app.accountKeeper.GetModuleAddress(kavadisttypes.ModuleName) for addr := range modAccAddrs { // Set the kavadist module account address as unblocked if addr == kavadistMaccAddr.String() { modAccAddrs[addr] = false } } return modAccAddrs } // GetMaccPerms returns a mapping of the application's module account permissions. func GetMaccPerms() map[string][]string { perms := make(map[string][]string) for k, v := range mAccPerms { perms[k] = v } return perms }
hg.js
(window.webpackJsonpFileIcons=window.webpackJsonpFileIcons||[]).push([[424],{226:function(q){q.exports={viewBox:"0 0 1024 1024",font:"file-icons",code:"263f",ref:"hg",path:"M209 473q0-60-52.5-90.5T52 382Q0 412 0 472.5T52 563q52 30 104.5 0t52.5-90m-115 4q2-26 32-40t45 10q12 19-5 39.5T131 512q-20 5-29-2t-8-33m18 73q3-3 16.5-4t27.5-9q14-7 28-23.5t18-40.5q1-14 2-12.5t2 9.5q-3 39-31 60.5T126 555q-5 1-12 0t-2-5M513 19q-138 17-208 89-71 71-76 156t54 165q59 80 181 115 105 29 127 75t7.5 98Q584 769 555 822t-27 97q3 45 44 71t103.5 16Q738 996 814 941q75-55 145-171 69-115 65-253-4-139-66.5-256T780 72Q666 0 513 19m60 538q-10-6-35.5-19T464 509q-40-13-62-24t-32-20q31 14 58.5 21t58.5 15q66 20 87 31.5t23 36.5q-1 14-7.5 4.5T573 557m163 178q30 4 60-22 31-26 56.5-66t44.5-87q19-47 26-85 14-75 19-25.5T933 550q-8 30-25 68-18 37-39 73.5T825 761q-22 32-41 54-30 34-63 50t-73 21q-41 8-44-16t16.5-55q19.5-31 51.5-57 33-27 64-23m137 121q-38 45-91.5 80.5T665 976q-16 3-27 0t-10-13q1-15 16.5-12t39.5-3q87-21 135.5-67.5T920 752q41-64 60-124t20-73q2 8-4 38t-21 72.5Q960 708 935 758q-26 50-62 98M207 672q-40 23-60 60-20 38-20 78t20 78q20 37 60 60 39 23 82 21 42-1 77-21t58-56q22-36 22-82t-22-82q-23-36-58-56t-77-22q-43-1-82 22m100 165q36-12 55.5-60.5T386 763q9 70-24 95t-48 29q-53 14-57-9t50-41m124-8q-7 48-45.5 80.5T308 944q-8 0-15-2.5t3-4.5q12-2 62.5-24t66.5-78q5-18 6.5-17t-.5 11z"}}}]);
mmsystem.rs
// Copyright © 2015-2017 winapi-rs developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. //! MM procedure declarations, constant definitions and macros use shared::basetsd::DWORD_PTR; use shared::minwindef::{BYTE, DWORD, UINT, WORD}; use shared::mmreg::WAVEFORMATEX; use um::winnt::{LPSTR, WCHAR}; //109 (Win 7 SDK) pub type MMVERSION = UINT; pub type MMRESULT = UINT; STRUCT!{struct MMTIME_smpte { hour: BYTE, min: BYTE, sec: BYTE, frame: BYTE, fps: BYTE, dummy: BYTE, pad: [BYTE; 2], }} STRUCT!{struct MMTIME_midi { songptrpos: DWORD, }} UNION2!{union MMTIME_u { [u8; 8], ms ms_mut: DWORD, sample sample_mut: DWORD, cb cb_mut: DWORD, ticks ticks_mut: DWORD, smpte smpte_mut: MMTIME_smpte, midi midi_mut: MMTIME_midi, }} STRUCT!{struct MMTIME { wType: UINT, u: MMTIME_u, }} pub type PMMTIME = *mut MMTIME; pub type NPMMTIME = *mut MMTIME; pub type LPMMTIME = *mut MMTIME; pub const TIME_MS: UINT = 0x0001; pub const TIME_SAMPLES: UINT = 0x0002; pub const TIME_BYTES: UINT = 0x0004; pub const TIME_SMPTE: UINT = 0x0008; pub const TIME_MIDI: UINT = 0x0010; pub const TIME_TICKS: UINT = 0x0020; pub const MM_JOY1MOVE: UINT = 0x3A0; pub const MM_JOY2MOVE: UINT = 0x3A1; pub const MM_JOY1ZMOVE: UINT = 0x3A2; pub const MM_JOY2ZMOVE: UINT = 0x3A3; pub const MM_JOY1BUTTONDOWN: UINT = 0x3B5; pub const MM_JOY2BUTTONDOWN: UINT = 0x3B6; pub const MM_JOY1BUTTONUP: UINT = 0x3B7; pub const MM_JOY2BUTTONUP: UINT = 0x3B8; pub const MM_MCINOTIFY: UINT = 0x3B9; pub const MM_WOM_OPEN: UINT = 0x3BB; pub const MM_WOM_CLOSE: UINT = 0x3BC; pub const MM_WOM_DONE: UINT = 0x3BD; pub const MM_WIM_OPEN: UINT = 0x3BE; pub const MM_WIM_CLOSE: UINT = 0x3BF; pub const MM_WIM_DATA: UINT = 0x3C0; pub const MM_MIM_OPEN: UINT = 0x3C1; pub const MM_MIM_CLOSE: UINT = 0x3C2; pub const MM_MIM_DATA: UINT = 0x3C3; pub const MM_MIM_LONGDATA: UINT = 0x3C4; pub const MM_MIM_ERROR: UINT = 0x3C5; pub const MM_MIM_LONGERROR: UINT = 0x3C6; pub const MM_MOM_OPEN: UINT = 0x3C7; pub const MM_MOM_CLOSE: UINT = 0x3C8; pub const MM_MOM_DONE: UINT = 0x3C9; pub const MMSYSERR_BASE: MMRESULT = 0; pub const WAVERR_BASE: MMRESULT = 32; pub const MIDIERR_BASE: MMRESULT = 64; pub const TIMERR_BASE: MMRESULT = 96; pub const JOYERR_BASE: MMRESULT = 160; pub const MCIERR_BASE: MMRESULT = 256; pub const MIXERR_BASE: MMRESULT = 1024; pub const MMSYSERR_NOERROR: MMRESULT = 0; pub const MMSYSERR_ERROR: MMRESULT = MMSYSERR_BASE + 1; pub const MMSYSERR_BADDEVICEID: MMRESULT = MMSYSERR_BASE + 2; pub const MMSYSERR_NOTENABLED: MMRESULT = MMSYSERR_BASE + 3; pub const MMSYSERR_ALLOCATED: MMRESULT = MMSYSERR_BASE + 4; pub const MMSYSERR_INVALHANDLE: MMRESULT = MMSYSERR_BASE + 5; pub const MMSYSERR_NODRIVER: MMRESULT = MMSYSERR_BASE + 6; pub const MMSYSERR_NOMEM: MMRESULT = MMSYSERR_BASE + 7; pub const MMSYSERR_NOTSUPPORTED: MMRESULT = MMSYSERR_BASE + 8; pub const MMSYSERR_BADERRNUM: MMRESULT = MMSYSERR_BASE + 9; pub const MMSYSERR_INVALFLAG: MMRESULT = MMSYSERR_BASE + 10; pub const MMSYSERR_INVALPARAM: MMRESULT = MMSYSERR_BASE + 11; pub const MMSYSERR_HANDLEBUSY: MMRESULT = MMSYSERR_BASE + 12; pub const MMSYSERR_INVALIDALIAS: MMRESULT = MMSYSERR_BASE + 13; pub const MMSYSERR_BADDB: MMRESULT = MMSYSERR_BASE + 14; pub const MMSYSERR_KEYNOTFOUND: MMRESULT = MMSYSERR_BASE + 15; pub const MMSYSERR_READERROR: MMRESULT = MMSYSERR_BASE + 16; pub const MMSYSERR_WRITEERROR: MMRESULT = MMSYSERR_BASE + 17; pub const MMSYSERR_DELETEERROR: MMRESULT = MMSYSERR_BASE + 18; pub const MMSYSERR_VALNOTFOUND: MMRESULT = MMSYSERR_BASE + 19; pub const MMSYSERR_NODRIVERCB: MMRESULT = MMSYSERR_BASE + 20; pub const MMSYSERR_MOREDATA: MMRESULT = MMSYSERR_BASE + 21; pub const MMSYSERR_LASTERROR: MMRESULT = MMSYSERR_BASE + 21; pub const MIDIERR_UNPREPARED: MMRESULT = MIDIERR_BASE + 0; pub const MIDIERR_STILLPLAYING: MMRESULT = MIDIERR_BASE + 1; pub const MIDIERR_NOMAP: MMRESULT = MIDIERR_BASE + 2; pub const MIDIERR_NOTREADY: MMRESULT = MIDIERR_BASE + 3; pub const MIDIERR_NODEVICE: MMRESULT = MIDIERR_BASE + 4; pub const MIDIERR_INVALIDSETUP: MMRESULT = MIDIERR_BASE + 5; pub const MIDIERR_BADOPENMODE: MMRESULT = MIDIERR_BASE + 6; pub const MIDIERR_DONT_CONTINUE: MMRESULT = MIDIERR_BASE + 7; pub const MIDIERR_LASTERROR: MMRESULT = MIDIERR_BASE + 7; pub const CALLBACK_TYPEMASK: DWORD = 0x00070000; pub const CALLBACK_NULL: DWORD = 0x00000000; pub const CALLBACK_WINDOW: DWORD = 0x00010000; pub const CALLBACK_TASK: DWORD = 0x00020000; pub const CALLBACK_FUNCTION: DWORD = 0x00030000; pub const CALLBACK_THREAD: DWORD = CALLBACK_TASK; pub const CALLBACK_EVENT: DWORD = 0x00050000; //497 (Win 7 SDK) pub const WAVERR_BADFORMAT: MMRESULT = WAVERR_BASE + 0; pub const WAVERR_STILLPLAYING: MMRESULT = WAVERR_BASE + 1; pub const WAVERR_UNPREPARED: MMRESULT = WAVERR_BASE + 2; pub const WAVERR_SYNC: MMRESULT = WAVERR_BASE + 3; pub const WAVERR_LASTERROR: MMRESULT = WAVERR_BASE + 3; DECLARE_HANDLE!(HWAVEIN, HWAVEIN__); DECLARE_HANDLE!(HWAVEOUT, HWAVEOUT__); pub type LPHWAVEIN = *mut HWAVEIN; pub type LPHWAVEOUT = *mut HWAVEOUT; pub const WOM_OPEN: UINT = MM_WOM_OPEN; pub const WOM_CLOSE: UINT = MM_WOM_CLOSE; pub const WOM_DONE: UINT = MM_WOM_DONE; pub const WIM_OPEN: UINT = MM_WIM_OPEN; pub const WIM_CLOSE: UINT = MM_WIM_CLOSE; pub const WIM_DATA: UINT = MM_WIM_DATA; pub const WAVE_MAPPER: UINT = 0xFFFFFFFF; pub const WAVE_FORMAT_QUERY: DWORD = 0x0001;
pub const WAVE_MAPPED: DWORD = 0x0004; pub const WAVE_FORMAT_DIRECT: DWORD = 0x0008; pub const WAVE_FORMAT_DIRECT_QUERY: DWORD = WAVE_FORMAT_QUERY | WAVE_FORMAT_DIRECT; pub const WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE: DWORD = 0x0010; STRUCT!{struct WAVEHDR { lpData: LPSTR, dwBufferLength: DWORD, dwBytesRecorded: DWORD, dwUser: DWORD_PTR, dwFlags: DWORD, dwLoops: DWORD, lpNext: *mut WAVEHDR, reserved: DWORD_PTR, }} pub type PWAVEHDR = *mut WAVEHDR; pub type NPWAVEHDR = *mut WAVEHDR; pub type LPWAVEHDR = *mut WAVEHDR; STRUCT!{struct WAVEOUTCAPSW { wMid: WORD, wPid: WORD, vDriverVersion: MMVERSION, szPname: [WCHAR; 32], dwFormats: DWORD, wChannels: WORD, wReserved1: WORD, dwSupport: DWORD, }} pub type PWAVEOUTCAPSW = *mut WAVEOUTCAPSW; pub type NPWAVEOUTCAPSW = *mut WAVEOUTCAPSW; pub type LPWAVEOUTCAPSW = *mut WAVEOUTCAPSW; STRUCT!{struct WAVEINCAPSW { wMid: WORD, wPid: WORD, vDriverVersion: MMVERSION, szPname: [WCHAR; 32], dwFormats: DWORD, wChannels: WORD, wReserved1: WORD, }} pub type PWAVEINCAPSW = *mut WAVEINCAPSW; pub type NPWAVEINCAPSW = *mut WAVEINCAPSW; pub type LPWAVEINCAPSW = *mut WAVEINCAPSW; pub const WAVE_INVALIDFORMAT: DWORD = 0x00000000; pub const WAVE_FORMAT_1M08: DWORD = 0x00000001; pub const WAVE_FORMAT_1S08: DWORD = 0x00000002; pub const WAVE_FORMAT_1M16: DWORD = 0x00000004; pub const WAVE_FORMAT_1S16: DWORD = 0x00000008; pub const WAVE_FORMAT_2M08: DWORD = 0x00000010; pub const WAVE_FORMAT_2S08: DWORD = 0x00000020; pub const WAVE_FORMAT_2M16: DWORD = 0x00000040; pub const WAVE_FORMAT_2S16: DWORD = 0x00000080; pub const WAVE_FORMAT_4M08: DWORD = 0x00000100; pub const WAVE_FORMAT_4S08: DWORD = 0x00000200; pub const WAVE_FORMAT_4M16: DWORD = 0x00000400; pub const WAVE_FORMAT_4S16: DWORD = 0x00000800; pub const WAVE_FORMAT_44M08: DWORD = 0x00000100; pub const WAVE_FORMAT_44S08: DWORD = 0x00000200; pub const WAVE_FORMAT_44M16: DWORD = 0x00000400; pub const WAVE_FORMAT_44S16: DWORD = 0x00000800; pub const WAVE_FORMAT_48M08: DWORD = 0x00001000; pub const WAVE_FORMAT_48S08: DWORD = 0x00002000; pub const WAVE_FORMAT_48M16: DWORD = 0x00004000; pub const WAVE_FORMAT_48S16: DWORD = 0x00008000; pub const WAVE_FORMAT_96M08: DWORD = 0x00010000; pub const WAVE_FORMAT_96S08: DWORD = 0x00020000; pub const WAVE_FORMAT_96M16: DWORD = 0x00040000; pub const WAVE_FORMAT_96S16: DWORD = 0x00080000; //782 (Win 7 SDK) pub type PWAVEFORMATEX = *mut WAVEFORMATEX; pub type NPWAVEFORMATEX = *mut WAVEFORMATEX; pub type LPWAVEFORMATEX = *mut WAVEFORMATEX; pub type LPCWAVEFORMATEX = *const WAVEFORMATEX; //2170 (Win 7 SDK) pub const TIMERR_NOERROR: MMRESULT = 0; pub const TIMERR_NOCANDO: MMRESULT = TIMERR_BASE + 1; pub const TIMERR_STRUCT: MMRESULT = TIMERR_BASE + 33; //2198 (Win 7 SDK) STRUCT!{struct TIMECAPS { wPeriodMin: UINT, wPeriodMax: UINT, }} pub type PTIMECAPS = *mut TIMECAPS; pub type NPTIMECAPS = *mut TIMECAPS; pub type LPTIMECAPS = *mut TIMECAPS; STRUCT!{struct MIDIHDR { lpData: LPSTR, dwBufferLength: DWORD, dwBytesRecorded: DWORD, dwUser: DWORD_PTR, dwFlags: DWORD, lpNext: *mut MIDIHDR, reserved: DWORD_PTR, dwOffset: DWORD, dwReserved: [DWORD_PTR; 4], }} pub type PMIDIHDR = *mut MIDIHDR; pub type NPMIDIHDR = *mut MIDIHDR; pub type LPMIDIHDR = *mut MIDIHDR; STRUCT!{struct MIDIINCAPSW { wMid: WORD, wPid: WORD, vDriverVersion: MMVERSION, szPname: [WCHAR; 32], dwSupport: DWORD, }} pub type PMIDIINCAPSW = *mut MIDIINCAPSW; pub type NPMIDIINCAPSW = *mut MIDIINCAPSW; pub type LPMIDIINCAPSW = *mut MIDIINCAPSW; STRUCT!{struct MIDIOUTCAPSW { wMid: WORD, wPid: WORD, vDriverVersion: MMVERSION, szPname: [WCHAR; 32], wTechnology: WORD, wVoices: WORD, wNotes: WORD, wChannelMask: WORD, dwSupport: DWORD, }} pub type PMIDIOUTCAPSW = *mut MIDIOUTCAPSW; pub type NPMIDIOUTCAPSW = *mut MIDIOUTCAPSW; pub type LPMIDIOUTCAPSW = *mut MIDIOUTCAPSW; DECLARE_HANDLE!(HMIDIIN, HMIDIIN__); DECLARE_HANDLE!(HMIDIOUT, HMIDIOUT__); pub type LPHMIDIIN = *mut HMIDIIN; pub type LPHMIDIOUT = *mut HMIDIOUT; DECLARE_HANDLE!(HMIDISTRM, HMIDISTRM__); DECLARE_HANDLE!(HMIDI, HMIDI__); pub type LPHMIDISTRM = *mut HMIDISTRM; pub type LPHMIDI = *mut HMIDI;
pub const WAVE_ALLOWSYNC: DWORD = 0x0002;
test_recall.py
import numpy as np import pytest from jina.executors.evaluators.rank.recall import RecallEvaluator @pytest.mark.parametrize( 'eval_at, expected', [ (0, 0.0), (1, 0.2), (2, 0.4), (3, 0.4), (5, 0.4), (100, 0.4) ] ) def test_recall_evaluator(eval_at, expected): matches_ids = [0, 1, 2, 3, 4] desired_ids = [1, 0, 20, 30, 40] evaluator = RecallEvaluator(eval_at=eval_at) assert evaluator.evaluate(actual=matches_ids, desired=desired_ids) == expected assert evaluator._running_stats._n == 1 np.testing.assert_almost_equal(evaluator.mean, expected) @pytest.mark.parametrize( 'eval_at, expected_first', [ (0, 0.0), (1, 0.2), (2, 0.4), (3, 0.4), (5, 0.4), (100, 0.4) ] ) def test_recall_evaluator_average(eval_at, expected_first):
def test_recall_evaluator_no_matches(): matches_ids = [] desired_ids = [1, 0, 20, 30, 40] evaluator = RecallEvaluator(eval_at=2) assert evaluator.evaluate(actual=matches_ids, desired=desired_ids) == 0.0 assert evaluator._running_stats._n == 1 np.testing.assert_almost_equal(evaluator.mean, 0.0)
matches_ids = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]] desired_ids = [[1, 0, 20, 30, 40], [-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1]] evaluator = RecallEvaluator(eval_at=eval_at) assert evaluator.evaluate(actual=matches_ids[0], desired=desired_ids[0]) == expected_first assert evaluator.evaluate(actual=matches_ids[1], desired=desired_ids[1]) == 0.0 assert evaluator.evaluate(actual=matches_ids[2], desired=desired_ids[2]) == 0.0 assert evaluator._running_stats._n == 3 np.testing.assert_almost_equal(evaluator.mean, expected_first / 3)
ChatController.js
import { ListAndChatView } from "../views/ListAndChatView.js"; import { ChatView } from "../views/ChatView.js"; import { HttpService } from "../services/HttpService.js"; export class ChatController { constructor(rootElementId, user) { this.user = user; this.messageService = new HttpService('/message'); this.fileService = new HttpService('/file'); this.messages = this.messageService.get() .then(res => res.json()) .then(res => { this.messages = res; console.log(this.messages); // Add all messages }) .catch(error => console.log(error)); this.socket = io.connect('http://chat.bles:7777/chat'); // this.socket = socket; this.socket.on('receiver message', body => { // Add message remote this.addMessageView(body); }); this.view = new ListAndChatView(document.getElementById(rootElementId), 4, 6); this.view.update(); this.listCol = document.getElementById('listCol'); this.chatCol = document.getElementById('chatCol'); this.chatView = new ChatView(this.chatCol); this.chatView.update(); this.eListMessages = document.getElementById('listMessages'); this.eMessage = document.getElementById('message'); this.eSend = document.getElementById('send'); this.checkMessageInput = () => { return this.eMessage.value.length > 0; } this.eSend.addEventListener('click', () => { if (this.checkMessageInput()) { let body = { text: this.eMessage.value,
// Add message local this.addMessageView(body); this.eMessage.value = ""; this.messageService.post(JSON.stringify(body)) .then(res => res.json()) .then(res => console.log(res)) .catch(error => console.log(error)); } }); } addMessageView(message) { let rootDiv = document.createElement('div'); rootDiv.setAttribute('class', this.user._id != message.user._id ? 'd-flex flex-row mb-2 col-12' : 'd-flex flex-row-reverse mb-2 col-12'); let img = document.createElement('img'); img.setAttribute('class', 'profileImage'); img.setAttribute('alt', 'Profile Image'); let profile = message.user.profilePicture; if (profile == 'default' || profile.filename == 'default') { img.setAttribute('src', './img/default.png'); } else { img.setAttribute('src', profile); } let card = document.createElement('div'); card.setAttribute('class', 'card shadow-sm'); let cardBody = document.createElement('div'); cardBody.setAttribute('class', 'card-body'); let title = document.createElement('h4'); title.setAttribute('class', 'card-title'); title.textContent = message.user.name; let textMessage = document.createElement('p'); textMessage.setAttribute('class', 'card-text'); textMessage.textContent = message.text; cardBody.appendChild(title); cardBody.appendChild(textMessage); card.appendChild(cardBody); rootDiv.appendChild(img); rootDiv.appendChild(card); this.eListMessages.appendChild(rootDiv); } }
user: this.user } this.socket.emit('send message', body);
TestAccidental.py
import unittest from MusicTheory.pitch.Accidental import Accidental import Framework.ConstMeta """ Degreeのテスト。 """ class TestAccidental(unittest.TestCase): def test_Accidentals(self): self.assertEqual(Accidental.Accidentals, {'♯': 1, '#': 1, '+': 1, '♭': -1, 'b': -1, '-': -1}) def test_Accidentals_NotSet(self): with self.asse
for count in range(1, 4): for name, interval in Accidental.Accidentals.items(): if not name: continue with self.subTest(accidenta=name, count=count): self.assertEqual(Accidental.Get(name * count), interval * count) def test_Get_None(self): self.assertEqual(Accidental.Get(None), 0) def test_Get_Blank(self): self.assertEqual(Accidental.Get(''), 0) def test_Get_int(self): with self.assertRaises(TypeError) as e: Accidental.Get(100) self.assertIn('引数accidentalは文字列型にしてください。', str(e.exception)) def test_Get_NotSameChars(self): with self.assertRaises(ValueError) as e: Accidental.Get('無効な文字') self.assertIn('引数accidentalは同じ文字のみ連続使用を許されます。異なる文字を混在させることはできません。', str(e.exception)) def test_Get_Invalid(self): with self.assertRaises(ValueError) as e: Accidental.Get('無無無') self.assertIn('引数accidentalに使える文字は次のものだけです。', str(e.exception)) def test_Get_Valid_NotSameChars(self): with self.assertRaises(ValueError) as e: Accidental.Get('+-') self.assertIn('引数accidentalは同じ文字のみ連続使用を許されます。異なる文字を混在させることはできません。', str(e.exception)) if __name__ == '__main__': unittest.main()
rtRaises(Framework.ConstMeta.ConstMeta.ConstError) as e: Accidental.Accidentals = 'some value.' self.assertEqual('readonly。再代入禁止です。', str(e.exception)) def test_Get(self):
utils.py
""" Martin Kersner, [email protected] seoulai.com 2018 Adapted by Gabriela B. to work with python 2.7 and ROS """ import random import numpy as np from base import Constants from rules import Rules class BoardEncoding(object): def __init__(self): self._constants = Constants() self._encoding = {} self.empty = 0 self.dark = 20 self.dark_king = 21 self.light = 10 self.light_king = 11 def __getitem__(self, name): return self._encoding[name] @property def empty(self): return self._encoding[self._constants.EMPTY] @empty.setter def empty(self, value): self._encoding[self._constants.EMPTY] = value @property def dark(self): return self._encoding[self._constants.DARK] @dark.setter def dark(self, value): self._encoding[self._constants.DARK] = value @property def dark_king(self): return self._encoding[self._constants.DARK_KING] @dark_king.setter def dark_king(self, value): self._encoding[self._constants.DARK_KING] = value @property def light(self): return self._encoding[self._constants.LIGHT] @light.setter def light(self, value): self._encoding[self._constants.LIGHT] = value @property def light_king(self): return self._encoding[self._constants.LIGHT_KING] @light_king.setter def light_king(self, value): self._encoding[self._constants.LIGHT_KING] = value def board_list2numpy( board_list, encoding) : """Convert the state of game (`board_list`) into 2D NumPy Array using `encoding`. Args: board_list: (List[List[Piece]]) State of the game. encoding: (BoardEncoding) Optional argument. If not given default encoding will be utilized. Returns: board_numpy: (np.array) """ board_size = len(board_list) constants = Constants() board_numpy = encoding[constants.EMPTY] * np.ones((board_size, board_size)) for row in range(board_size): for col in range(board_size): if board_list[row][col] is not None: ptype = board_list[row][col].ptype king = board_list[row][col].king if ptype == constants.LIGHT: if king: piece_type = constants.LIGHT_KING else: piece_type = constants.LIGHT else: # DARK if king: piece_type = constants.DARK_KING else: piece_type = constants.DARK board_numpy[row][col] = encoding[piece_type] return board_numpy def
( board, ptype, board_size): """Generate random move from all `ptype` valid moves but does not execute it. Args: board: (List[List[Piece]]) State of the game. ptype: (int) type of piece for which random move will be generated board_size: (int) size of board """ valid_moves = Rules.generate_valid_moves(board, ptype, board_size) rand_from_row, rand_from_col = random.choice(list(valid_moves.keys())) rand_to_row, rand_to_col = random.choice(valid_moves[(rand_from_row, rand_from_col)]) return rand_from_row, rand_from_col, rand_to_row, rand_to_col #new functions def print_board(board_list): """ print board for debugging putposes receives board as a board_list: List[List], """ numpy_board=board_list2numpy(board_list) print(numpy_board)
generate_random_move
3 - notacaoLiteral.js
const a = 1 const b = 2 const c = 3 const obj1 = {a : a, b : b, c : c} const obj2 = {a, b, c} console.log(obj1, obj2) const nomeAttr = 'nota' const valorAttr = 7.87 const obj3 = {} obj3[nomeAttr] = valorAttr console.log(obj3) const obj4 = {[nomeAttr] : valorAttr} console.log(obj4)
const obj5 = { funcao1 : function (){ // ... }, funcao2(){ // .... } } console.log(obj5)
parameters.rs
use std::rc::Rc; use super::{Context, Id}; use ffi::*; use media; pub struct Parameters { ptr: *mut AVCodecParameters, owner: Option<Rc<dyn Drop>>, } unsafe impl Send for Parameters {} impl Parameters { pub unsafe fn wrap(ptr: *mut AVCodecParameters, owner: Option<Rc<dyn Drop>>) -> Self { Parameters { ptr, owner } } pub unsafe fn as_ptr(&self) -> *const AVCodecParameters { self.ptr as *const _ } pub unsafe fn as_mut_ptr(&mut self) -> *mut AVCodecParameters { self.ptr } } impl Parameters { pub fn new() -> Self { unsafe { Parameters { ptr: avcodec_parameters_alloc(), owner: None, } } } pub fn medium(&self) -> media::Type { unsafe { media::Type::from((*self.as_ptr()).codec_type) } } pub fn id(&self) -> Id { unsafe { Id::from((*self.as_ptr()).codec_id) } } } impl Default for Parameters { fn default() -> Self { Self::new() } } impl Drop for Parameters { fn drop(&mut self) { unsafe { if self.owner.is_none() { avcodec_parameters_free(&mut self.as_mut_ptr()); } } } } impl Clone for Parameters { fn clone(&self) -> Self { let mut ctx = Parameters::new(); ctx.clone_from(self); ctx } fn clone_from(&mut self, source: &Self) { unsafe { avcodec_parameters_copy(self.as_mut_ptr(), source.as_ptr());
impl<C: AsRef<Context>> From<C> for Parameters { fn from(context: C) -> Parameters { let mut parameters = Parameters::new(); let context = context.as_ref(); unsafe { avcodec_parameters_from_context(parameters.as_mut_ptr(), context.as_ptr()); } parameters } }
} } }
get_exit_nodes_test.py
# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Callable from ..get_exit_nodes import ExitNodeGenerator from ..model_generator import Configuration from .test_functions import __name__ as qualifier, all_functions class GetExitNodesTest(unittest.TestCase): def test_compute_models(self):
sink = "TaintSink[ReturnedToUser]" self.assertEqual( list(ExitNodeGenerator().compute_models(all_functions)), [ f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...", f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...", f"def {qualifier}.testA() -> {sink}: ...", f"def {qualifier}.testB(x) -> {sink}: ...", f"def {qualifier}.testC(x) -> {sink}: ...", f"def {qualifier}.testD(x, *args) -> {sink}: ...", f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...", ], ) Configuration.whitelisted_views = [f"{qualifier}.TestClass.methodA"] self.assertEqual( list(ExitNodeGenerator().compute_models(all_functions)), [ f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...", f"def {qualifier}.testA() -> {sink}: ...", f"def {qualifier}.testB(x) -> {sink}: ...", f"def {qualifier}.testC(x) -> {sink}: ...", f"def {qualifier}.testD(x, *args) -> {sink}: ...", f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...", ], )
editCdrCshDrawerController.js
angular.module('AdCshdwr').controller('EditCdrCshDrawerController', function($scope, $routeParams, $location, CdrCshDrawerResource ) { var self = this; $scope.disabled = false; $scope.$location = $location; $scope.get = function() { var successCallback = function(data){ self.original = data; $scope.cdrCshDrawer = new CdrCshDrawerResource(self.original); }; var errorCallback = function() { $location.path("/CdrCshDrawers"); }; CdrCshDrawerResource.get({CdrCshDrawerId:$routeParams.CdrCshDrawerId}, successCallback, errorCallback); };
$scope.isClean = function() { return angular.equals(self.original, $scope.cdrCshDrawer); }; $scope.save = function() { var successCallback = function(){ $scope.get(); $scope.displayError = false; }; var errorCallback = function() { $scope.displayError=true; }; $scope.cdrCshDrawer.$update(successCallback, errorCallback); }; $scope.cancel = function() { $location.path("/CdrCshDrawers"); }; $scope.remove = function() { var successCallback = function() { $location.path("/CdrCshDrawers"); $scope.displayError = false; }; var errorCallback = function() { $scope.displayError=true; }; $scope.cdrCshDrawer.$remove(successCallback, errorCallback); }; $scope.get(); });
local_cluster_manager.go
// Copyright 2021 IBM Corp. // SPDX-License-Identifier: Apache-2.0 package local
import ( "context" "fmt" "emperror.dev/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "fybrik.io/fybrik/manager/apis/app/v1alpha1" "fybrik.io/fybrik/pkg/multicluster" ) const ( clusterMetadataConfigmapName string = "cluster-metadata" ) // localClusterManager for local cluster configuration type localClusterManager struct { Client client.Client Namespace string } // GetClusters returns a list of registered clusters func (cm *localClusterManager) GetClusters() ([]multicluster.Cluster, error) { cmcm := corev1.ConfigMap{} namespacedName := client.ObjectKey{ Name: clusterMetadataConfigmapName, Namespace: cm.Namespace, } if err := cm.Client.Get(context.Background(), namespacedName, &cmcm); err != nil { return nil, errors.Wrap(err, "error in GetClusters") } cluster := multicluster.CreateCluster(cmcm) clusters := []multicluster.Cluster{cluster} return clusters, nil } // GetLocalClusterName returns the local cluster name func (cm *localClusterManager) GetLocalClusterName() (string, error) { clusters, err := cm.GetClusters() if err != nil { return "", err } if len(clusters) != 1 { return "", errors.New(v1alpha1.InvalidClusterConfiguration) } return clusters[0].Name, nil } // GetBlueprint returns a blueprint matching the given name, namespace and cluster details func (cm *localClusterManager) GetBlueprint(cluster, namespace, name string) (*v1alpha1.Blueprint, error) { if localCluster, err := cm.GetLocalClusterName(); err != nil || localCluster != cluster { return nil, fmt.Errorf("unregistered cluster: %s", cluster) } blueprint := &v1alpha1.Blueprint{} namespacedName := client.ObjectKey{ Name: name, Namespace: namespace, } err := cm.Client.Get(context.Background(), namespacedName, blueprint) return blueprint, err } // CreateBlueprint creates a blueprint resource or updates an existing one func (cm *localClusterManager) CreateBlueprint(cluster string, blueprint *v1alpha1.Blueprint) error { return cm.UpdateBlueprint(cluster, blueprint) } // UpdateBlueprint updates the given blueprint or creates a new one if it does not exist func (cm *localClusterManager) UpdateBlueprint(cluster string, blueprint *v1alpha1.Blueprint) error { if localCluster, err := cm.GetLocalClusterName(); err != nil || localCluster != cluster { return fmt.Errorf("unregistered cluster: %s", cluster) //nolint:revive // Ignore repetitive error msg } resource := &v1alpha1.Blueprint{ ObjectMeta: metav1.ObjectMeta{ Name: blueprint.Name, Namespace: blueprint.Namespace, }, } if _, err := ctrl.CreateOrUpdate(context.Background(), cm.Client, resource, func() error { resource.Spec = blueprint.Spec resource.ObjectMeta.Labels = blueprint.ObjectMeta.Labels resource.ObjectMeta.Annotations = blueprint.ObjectMeta.Annotations return nil }); err != nil { return err } return nil } // DeleteBlueprint deletes the blueprint resource func (cm *localClusterManager) DeleteBlueprint(cluster, namespace, name string) error { blueprint, err := cm.GetBlueprint(cluster, namespace, name) if err != nil { return err } return cm.Client.Delete(context.Background(), blueprint) } // NewClusterManager creates an instance of ClusterManager for a local cluster configuration func NewClusterManager(cl client.Client, namespace string) (multicluster.ClusterManager, error) { return &localClusterManager{ Client: cl, Namespace: namespace, }, nil }
submit_tx.rs
//! Txs App submit_tx module #![forbid(unsafe_code)] use crate::{ config::AppCfg, entrypoint::{self, EntryPointTxsCmd}, prelude::app_config, save_tx::save_tx, sign_tx::sign_tx, }; use abscissa_core::{status_ok, status_warn}; use anyhow::Error; use cli::{libra_client::LibraClient, AccountData, AccountStatus}; use ol_keys::{wallet, scheme::KeyScheme}; use libra_crypto::{ ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, test_utils::KeyPair, }; use libra_global_constants::OPERATOR_KEY; use libra_json_rpc_types::views::{TransactionView, VMStatusView}; use libra_secure_storage::{CryptoStorage, NamespacedStorage, OnDiskStorageInternal, Storage}; use libra_types::{account_address::AccountAddress, waypoint::Waypoint}; use libra_types::{ chain_id::ChainId, transaction::{authenticator::AuthenticationKey, Script, SignedTransaction}, }; use libra_wallet::WalletLibrary; use ol_types::{ self, config::{TxCost, TxType}, }; use reqwest::Url; use std::{ io::{stdout, Write}, path::PathBuf, thread, time, }; /// All the parameters needed for a client transaction. #[derive(Debug)] pub struct TxParams { /// User's 0L authkey used in mining. pub auth_key: AuthenticationKey, /// Address of the signer of transaction, e.g. owner's operator pub signer_address: AccountAddress, /// Optional field for Miner, for operator to send owner // TODO: refactor so that this is not par of the TxParams type pub owner_address: AccountAddress, /// Url pub url: Url, /// waypoint pub waypoint: Waypoint, /// KeyPair pub keypair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey>, /// tx cost and timeout info pub tx_cost: TxCost, // /// User's Maximum gas_units willing to run. Different than coin. // pub max_gas_unit_for_tx: u64, // /// User's GAS Coin price to submit transaction. // pub coin_price_per_unit: u64, // /// User's transaction timeout. // pub user_tx_timeout: u64, // for compatibility with UTC's timestamp. /// Chain id pub chain_id: ChainId, } // pub struct TxParams { // /// Sender's 0L authkey, may be the operator. // pub sender_auth_key: AuthenticationKey, // /// User's operator sender account if different than the owner account, used to send transactions // pub sender_address: AccountAddress, // /// User's 0L owner address, where the mining proofs go to. // pub owner_address: AccountAddress, // /// Url // pub url: Url, // /// waypoint // pub waypoint: Waypoint, // /// KeyPair // pub keypair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey>, // /// User's Maximum gas_units willing to run. Different than coin. // pub max_gas_unit_for_tx: u64, // /// User's GAS Coin price to submit transaction. // pub coin_price_per_unit: u64, // /// User's transaction timeout. // pub user_tx_timeout: u64, // for compatibility with UTC's timestamp. // } /// wrapper which checks entry point arguments before submitting tx, possibly saving the tx script pub fn maybe_submit( script: Script, tx_params: &TxParams, no_send: bool, save_path: Option<PathBuf>, ) -> Result<SignedTransaction, Error> { let mut client = LibraClient::new(tx_params.url.clone(), tx_params.waypoint).unwrap(); let (mut account_data, txn) = stage(script, tx_params, &mut client); if let Some(path) = save_path { // TODO: This will not work with batch operations like autopay_batch, last one will overwrite the file. save_tx(txn.clone(), path); } if no_send { return Ok(txn); } match submit_tx(client, txn.clone(), &mut account_data) { Ok(res) => match eval_tx_status(res) { Ok(_) => Ok(txn), Err(e) => Err(e), }, Err(e) => Err(e), } } /// convenience for wrapping multiple transactions pub fn batch_wrapper( batch: Vec<Script>, tx_params: &TxParams, no_send: bool, save_path: Option<PathBuf>, ) { batch.into_iter().enumerate().for_each(|(i, s)| { // TODO: format path for batch scripts let new_path = if save_path.is_some() { Some(save_path.clone().unwrap().join(i.to_string())) } else { None }; maybe_submit(s, tx_params, no_send, new_path).unwrap(); // TODO: handle saving of batches to file. }); } fn stage( script: Script, tx_params: &TxParams, client: &mut LibraClient, ) -> (AccountData, SignedTransaction) { // let mut client = LibraClient::new(tx_params.url.clone(), tx_params.waypoint).unwrap(); let chain_id = ChainId::new(client.get_metadata().unwrap().chain_id); let (account_state, _) = client .get_account(tx_params.signer_address.clone(), true) .unwrap(); let sequence_number = match account_state { Some(av) => av.sequence_number, None => 0, }; // Sign the transaction script let txn = sign_tx(&script, tx_params, sequence_number, chain_id).unwrap(); // Get account_data struct let signer_account_data = AccountData { address: tx_params.signer_address, authentication_key: Some(tx_params.auth_key.to_vec()), key_pair: Some(tx_params.keypair.clone()), sequence_number, status: AccountStatus::Persisted, }; (signer_account_data, txn) } /// Submit a transaction to the network. pub fn submit_tx( mut client: LibraClient, txn: SignedTransaction, mut signer_account_data: &mut AccountData, ) -> Result<TransactionView, Error> { // let mut client = LibraClient::new(tx_params.url.clone(), tx_params.waypoint).unwrap(); // Submit the transaction with libra_client match client.submit_transaction(Some(&mut signer_account_data), txn.clone()) { Ok(_) => match wait_for_tx(txn.sender(), txn.sequence_number(), &mut client) { Some(res) => Ok(res), None => Err(Error::msg("No Transaction View returned")), }, Err(err) => Err(err), } } /// Main get tx params logic based on the design in this URL: /// https://github.com/OLSF/libra/blob/tx-sender/txs/README.md#txs-logic--usage pub fn tx_params_wrapper(tx_type: TxType) -> Result<TxParams, Error> { let EntryPointTxsCmd { url, waypoint, swarm_path, swarm_persona, is_operator, use_upstream_url, .. } = entrypoint::get_args(); let app_config = app_config().clone(); tx_params( app_config, url, waypoint, swarm_path, swarm_persona, tx_type, is_operator, use_upstream_url, ) } /// tx_parameters format pub fn tx_params( config: AppCfg, url_opt: Option<Url>, waypoint: Option<Waypoint>, swarm_path: Option<PathBuf>, swarm_persona: Option<String>, tx_type: TxType, is_operator: bool, use_upstream_url: bool, ) -> Result<TxParams, Error> { let url = if url_opt.is_some() { url_opt.unwrap() } else { config.what_url(use_upstream_url) }; let mut tx_params: TxParams = if swarm_path.is_some() { get_tx_params_from_swarm( swarm_path.clone().expect("needs a valid swarm temp dir"), swarm_persona.expect("need a swarm 'persona' with credentials in fixtures."), is_operator, ) .unwrap() } else { if is_operator { get_oper_params( &config, tx_type, url, waypoint) } else { // Get from 0L.toml e.g. ~/.0L/0L.toml, or use Profile::default() get_tx_params_from_toml(config.clone(), tx_type, None, url, waypoint, swarm_path.as_ref().is_some()).unwrap() } }; if waypoint.is_some() { tx_params.waypoint = waypoint.unwrap(); } Ok(tx_params) } /// Extract params from a local running swarm pub fn get_tx_params_from_swarm( swarm_path: PathBuf, swarm_persona: String, is_operator: bool, ) -> Result<TxParams, Error> { let (url, waypoint) = ol_types::config::get_swarm_rpc_url(swarm_path); let mnem = ol_fixtures::get_persona_mnem(&swarm_persona.as_str()); let keys = KeyScheme::new_from_mnemonic(mnem); let keypair = if is_operator { KeyPair::from(keys.child_1_operator.get_private_key()) } else { KeyPair::from(keys.child_0_owner.get_private_key()) }; let pubkey = keys.child_0_owner.get_public(); let auth_key = AuthenticationKey::ed25519(&pubkey); let address = auth_key.derived_address(); let tx_params = TxParams { auth_key, signer_address: address, owner_address: address, url, waypoint, keypair, tx_cost: TxCost { max_gas_unit_for_tx: 1_000_000, coin_price_per_unit: 1, // in micro_gas user_tx_timeout: 5_000, }, chain_id: ChainId::new(4), }; println!("Info: Got tx params from swarm"); Ok(tx_params) } /// Form tx parameters struct pub fn get_oper_params( config: &AppCfg, tx_type: TxType, url: Url, wp: Option<Waypoint>, // // url_opt overrides all node configs, takes precedence over use_backup_url // url_opt: Option<Url>, // upstream_url: bool, ) -> TxParams { let orig_storage = Storage::OnDiskStorage(OnDiskStorageInternal::new( config.workspace.node_home.join("key_store.json").to_owned(), )); let storage = Storage::NamespacedStorage(NamespacedStorage::new( orig_storage, format!("{}-oper", &config.profile.auth_key), )); // export_private_key_for_version let privkey = storage .export_private_key(OPERATOR_KEY) .expect("could not parse operator key in key_store.json"); let keypair = KeyPair::from(privkey); let pubkey = &keypair.public_key; // keys.child_0_owner.get_public(); let auth_key = AuthenticationKey::ed25519(pubkey); let waypoint = wp.unwrap_or_else(|| { config.get_waypoint(None).unwrap() }); let tx_cost = config.tx_configs.get_cost(tx_type); TxParams { auth_key, signer_address: auth_key.derived_address(), owner_address: config.profile.account, // address of sender url, waypoint, keypair, tx_cost, chain_id: ChainId::new(1), } } /// Gets transaction params from the 0L project root. pub fn
( config: AppCfg, tx_type: TxType, wallet_opt: Option<&WalletLibrary>, url: Url, wp: Option<Waypoint>, is_swarm: bool, ) -> Result<TxParams, Error> { // let url = config.profile.default_node.clone().unwrap(); let (auth_key, address, wallet) = if let Some(wallet) = wallet_opt { wallet::get_account_from_wallet(wallet) } else { wallet::get_account_from_prompt() }; let waypoint = wp.unwrap_or_else(|| { config.get_waypoint(None).unwrap() }); let keys = KeyScheme::new_from_mnemonic(wallet.mnemonic()); let keypair = KeyPair::from(keys.child_0_owner.get_private_key()); let tx_cost = config.tx_configs.get_cost(tx_type); let chain_id = if is_swarm { ChainId::new(4) } else { // main net id ChainId::new(1) }; let tx_params = TxParams { auth_key, signer_address: address, owner_address: address, url, waypoint, keypair, tx_cost: tx_cost.to_owned(), // max_gas_unit_for_tx: config.tx_configs.management_txs.max_gas_unit_for_tx, // coin_price_per_unit: config.tx_configs.management_txs.coin_price_per_unit, // in micro_gas // user_tx_timeout: config.tx_configs.management_txs.user_tx_timeout, chain_id, }; Ok(tx_params) } /// Wait for the response from the libra RPC. pub fn wait_for_tx( signer_address: AccountAddress, sequence_number: u64, client: &mut LibraClient, ) -> Option<TransactionView> { println!( "\nAwaiting tx status \n\ Submitted from account: {} with sequence number: {}", signer_address, sequence_number ); loop { thread::sleep(time::Duration::from_millis(1000)); // prevent all the logging the client does while // it loops through the query. stdout().flush().unwrap(); match &mut client.get_txn_by_acc_seq(signer_address, sequence_number, false) { Ok(Some(txn_view)) => { return Some(txn_view.to_owned()); } Err(e) => { println!("Response with error: {:?}", e); } _ => { print!("."); } } } } /// Evaluate the response of a submitted txs transaction. pub fn eval_tx_status(result: TransactionView) -> Result<(), Error> { match result.vm_status == VMStatusView::Executed { true => { status_ok!("\nSuccess:", "transaction executed"); Ok(()) } false => { status_warn!("Transaction failed"); let msg = format!("Rejected with code:{:?}", result.vm_status); Err(Error::msg(msg)) } } } impl TxParams { /// creates params for unit tests pub fn test_fixtures() -> TxParams { // This mnemonic is hard coded into the swarm configs. see configs/config_builder // let mnem_path = format!("./fixtures/mnemonic/{}.mnem", persona); let mnemonic = "talent sunset lizard pill fame nuclear spy noodle basket okay critic grow sleep legend hurry pitch blanket clerk impose rough degree sock insane purse".to_string(); let keys = KeyScheme::new_from_mnemonic(mnemonic); let keypair = KeyPair::from(keys.child_0_owner.get_private_key()); let pubkey = keys.child_0_owner.get_public(); let signer_auth_key = AuthenticationKey::ed25519(&pubkey); let signer_address = signer_auth_key.derived_address(); let url = Url::parse("http://localhost:8080").unwrap(); let waypoint: Waypoint = "0:732ea2e1c3c5ee892da11abcd1211f22c06b5cf75fd6d47a9492c21dbfc32a46" .parse() .unwrap(); TxParams { auth_key: signer_auth_key, signer_address, owner_address: signer_address, url, waypoint, keypair, tx_cost: TxCost::new(5_000), // max_gas_unit_for_tx: 5_000, // coin_price_per_unit: 1, // in micro_gas // user_tx_timeout: 5_000, chain_id: ChainId::new(4), // swarm/testnet } } }
get_tx_params_from_toml
design.ts
import { v4 as uuid } from 'uuid'; const designCategory = { courses: [ { slug: 'drawing-course-for-beginners', title: 'Drawing course for beginners', }, { slug: 'learn-3d-animations', title: 'Learn 3D animations', }, { slug: 'advanced-photoshop-training', title: 'Advanced photoshop training', }, { slug: 'ux-for-beginners', title: 'User experience for beginners', }, { slug: 'drawing-comics', title: 'Drawing comics', }, { slug: 'animations-masterclass', title: 'Animations masterclass', }, { slug: 'adobe-illustrator-for-beginners', title: 'Adobe Illustrator for beginners', }, { slug: 'web-design-basics', title: 'Web design basics', }, { slug: 'sketch-fundamentals', title: 'Sketch fundamentals', }, { slug: 'mobile-app-design', title: 'Mobile App design', }, ], id: uuid(), slug: 'design', title: 'Design', };
export default designCategory;
Admin.js
import { Button, Typography } from "@mui/material"; import { Link } from "react-router-dom"; import React from "react"; export default function
() { return ( <div> <Typography variant="h3">我是Admin 页面</Typography> <Button variant="contained" component={Link} to="/admin/foundingMember/create" > 添加初始团队 </Button> <Button variant="contained" component={Link} to="/admin/uwcssaMember/create" > 添加学生会成员 </Button> </div> ); }
Admin
utils.rs
// Copyright 2018 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Math helper functions #[cfg(feature="simd_support")] use packed_simd::*; #[cfg(feature="std")] use crate::distributions::ziggurat_tables; #[cfg(feature="std")] use crate::Rng; pub trait WideningMultiply<RHS = Self> { type Output; fn wmul(self, x: RHS) -> Self::Output; } macro_rules! wmul_impl { ($ty:ty, $wide:ty, $shift:expr) => { impl WideningMultiply for $ty { type Output = ($ty, $ty); #[inline(always)] fn wmul(self, x: $ty) -> Self::Output { let tmp = (self as $wide) * (x as $wide); ((tmp >> $shift) as $ty, tmp as $ty) } } }; // simd bulk implementation ($(($ty:ident, $wide:ident),)+, $shift:expr) => { $( impl WideningMultiply for $ty { type Output = ($ty, $ty); #[inline(always)] fn wmul(self, x: $ty) -> Self::Output { // For supported vectors, this should compile to a couple // supported multiply & swizzle instructions (no actual // casting). // TODO: optimize let y: $wide = self.cast(); let x: $wide = x.cast(); let tmp = y * x; let hi: $ty = (tmp >> $shift).cast(); let lo: $ty = tmp.cast(); (hi, lo) } } )+ }; } wmul_impl! { u8, u16, 8 } wmul_impl! { u16, u32, 16 } wmul_impl! { u32, u64, 32 } #[cfg(not(target_os = "emscripten"))] wmul_impl! { u64, u128, 64 } // This code is a translation of the __mulddi3 function in LLVM's // compiler-rt. It is an optimised variant of the common method // `(a + b) * (c + d) = ac + ad + bc + bd`. // // For some reason LLVM can optimise the C version very well, but // keeps shuffling registers in this Rust translation. macro_rules! wmul_impl_large { ($ty:ty, $half:expr) => { impl WideningMultiply for $ty { type Output = ($ty, $ty); #[inline(always)] fn wmul(self, b: $ty) -> Self::Output { const LOWER_MASK: $ty = !0 >> $half; let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK); let mut t = low >> $half; low &= LOWER_MASK; t += (self >> $half).wrapping_mul(b & LOWER_MASK); low += (t & LOWER_MASK) << $half; let mut high = t >> $half; t = low >> $half; low &= LOWER_MASK; t += (b >> $half).wrapping_mul(self & LOWER_MASK); low += (t & LOWER_MASK) << $half; high += t >> $half; high += (self >> $half).wrapping_mul(b >> $half); (high, low) } } }; // simd bulk implementation (($($ty:ty,)+) $scalar:ty, $half:expr) => { $( impl WideningMultiply for $ty { type Output = ($ty, $ty); #[inline(always)] fn wmul(self, b: $ty) -> Self::Output { // needs wrapping multiplication const LOWER_MASK: $scalar = !0 >> $half; let mut low = (self & LOWER_MASK) * (b & LOWER_MASK); let mut t = low >> $half; low &= LOWER_MASK; t += (self >> $half) * (b & LOWER_MASK); low += (t & LOWER_MASK) << $half; let mut high = t >> $half; t = low >> $half; low &= LOWER_MASK; t += (b >> $half) * (self & LOWER_MASK); low += (t & LOWER_MASK) << $half; high += t >> $half; high += (self >> $half) * (b >> $half); (high, low) } } )+ }; } #[cfg(target_os = "emscripten")] wmul_impl_large! { u64, 32 } #[cfg(not(target_os = "emscripten"))] wmul_impl_large! { u128, 64 } macro_rules! wmul_impl_usize { ($ty:ty) => { impl WideningMultiply for usize { type Output = (usize, usize); #[inline(always)] fn wmul(self, x: usize) -> Self::Output { let (high, low) = (self as $ty).wmul(x as $ty); (high as usize, low as usize) } } } } #[cfg(target_pointer_width = "32")] wmul_impl_usize! { u32 } #[cfg(target_pointer_width = "64")] wmul_impl_usize! { u64 } #[cfg(all(feature = "simd_support", feature = "nightly"))] mod simd_wmul { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; use super::*; wmul_impl! { (u8x2, u16x2), (u8x4, u16x4), (u8x8, u16x8), (u8x16, u16x16), (u8x32, u16x32),, 8 } wmul_impl! { (u16x2, u32x2),, 16 } #[cfg(not(target_feature = "sse2"))] wmul_impl! { (u16x4, u32x4),, 16 }
#[cfg(not(target_feature = "avx2"))] wmul_impl! { (u16x16, u32x16),, 16 } // 16-bit lane widths allow use of the x86 `mulhi` instructions, which // means `wmul` can be implemented with only two instructions. #[allow(unused_macros)] macro_rules! wmul_impl_16 { ($ty:ident, $intrinsic:ident, $mulhi:ident, $mullo:ident) => { impl WideningMultiply for $ty { type Output = ($ty, $ty); #[inline(always)] fn wmul(self, x: $ty) -> Self::Output { let b = $intrinsic::from_bits(x); let a = $intrinsic::from_bits(self); let hi = $ty::from_bits(unsafe { $mulhi(a, b) }); let lo = $ty::from_bits(unsafe { $mullo(a, b) }); (hi, lo) } } }; } #[cfg(target_feature = "sse2")] wmul_impl_16! { u16x4, __m64, _mm_mulhi_pu16, _mm_mullo_pi16 } #[cfg(target_feature = "sse4.2")] wmul_impl_16! { u16x8, __m128i, _mm_mulhi_epu16, _mm_mullo_epi16 } #[cfg(target_feature = "avx2")] wmul_impl_16! { u16x16, __m256i, _mm256_mulhi_epu16, _mm256_mullo_epi16 } // FIXME: there are no `__m512i` types in stdsimd yet, so `wmul::<u16x32>` // cannot use the same implementation. wmul_impl! { (u32x2, u64x2), (u32x4, u64x4), (u32x8, u64x8),, 32 } // TODO: optimize, this seems to seriously slow things down wmul_impl_large! { (u8x64,) u8, 4 } wmul_impl_large! { (u16x32,) u16, 8 } wmul_impl_large! { (u32x16,) u32, 16 } wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 } } #[cfg(all(feature = "simd_support", feature = "nightly"))] pub use self::simd_wmul::*; /// Helper trait when dealing with scalar and SIMD floating point types. pub(crate) trait FloatSIMDUtils { // `PartialOrd` for vectors compares lexicographically. We want to compare all // the individual SIMD lanes instead, and get the combined result over all // lanes. This is possible using something like `a.lt(b).all()`, but we // implement it as a trait so we can write the same code for `f32` and `f64`. // Only the comparison functions we need are implemented. fn all_lt(self, other: Self) -> bool; fn all_le(self, other: Self) -> bool; fn all_finite(self) -> bool; type Mask; fn finite_mask(self) -> Self::Mask; fn gt_mask(self, other: Self) -> Self::Mask; fn ge_mask(self, other: Self) -> Self::Mask; // Decrease all lanes where the mask is `true` to the next lower value // representable by the floating-point type. At least one of the lanes // must be set. fn decrease_masked(self, mask: Self::Mask) -> Self; // Convert from int value. Conversion is done while retaining the numerical // value, not by retaining the binary representation. type UInt; fn cast_from_int(i: Self::UInt) -> Self; } /// Implement functions available in std builds but missing from core primitives #[cfg(not(std))] pub(crate) trait Float : Sized { fn is_nan(self) -> bool; fn is_infinite(self) -> bool; fn is_finite(self) -> bool; } /// Implement functions on f32/f64 to give them APIs similar to SIMD types pub(crate) trait FloatAsSIMD : Sized { #[inline(always)] fn lanes() -> usize { 1 } #[inline(always)] fn splat(scalar: Self) -> Self { scalar } #[inline(always)] fn extract(self, index: usize) -> Self { debug_assert_eq!(index, 0); self } #[inline(always)] fn replace(self, index: usize, new_value: Self) -> Self { debug_assert_eq!(index, 0); new_value } } pub(crate) trait BoolAsSIMD : Sized { fn any(self) -> bool; fn all(self) -> bool; fn none(self) -> bool; } impl BoolAsSIMD for bool { #[inline(always)] fn any(self) -> bool { self } #[inline(always)] fn all(self) -> bool { self } #[inline(always)] fn none(self) -> bool { !self } } macro_rules! scalar_float_impl { ($ty:ident, $uty:ident) => { #[cfg(not(std))] impl Float for $ty { #[inline] fn is_nan(self) -> bool { self != self } #[inline] fn is_infinite(self) -> bool { self == ::core::$ty::INFINITY || self == ::core::$ty::NEG_INFINITY } #[inline] fn is_finite(self) -> bool { !(self.is_nan() || self.is_infinite()) } } impl FloatSIMDUtils for $ty { type Mask = bool; #[inline(always)] fn all_lt(self, other: Self) -> bool { self < other } #[inline(always)] fn all_le(self, other: Self) -> bool { self <= other } #[inline(always)] fn all_finite(self) -> bool { self.is_finite() } #[inline(always)] fn finite_mask(self) -> Self::Mask { self.is_finite() } #[inline(always)] fn gt_mask(self, other: Self) -> Self::Mask { self > other } #[inline(always)] fn ge_mask(self, other: Self) -> Self::Mask { self >= other } #[inline(always)] fn decrease_masked(self, mask: Self::Mask) -> Self { debug_assert!(mask, "At least one lane must be set"); <$ty>::from_bits(self.to_bits() - 1) } type UInt = $uty; #[inline] fn cast_from_int(i: Self::UInt) -> Self { i as $ty } } impl FloatAsSIMD for $ty {} } } scalar_float_impl!(f32, u32); scalar_float_impl!(f64, u64); #[cfg(feature="simd_support")] macro_rules! simd_impl { ($ty:ident, $f_scalar:ident, $mty:ident, $uty:ident) => { impl FloatSIMDUtils for $ty { type Mask = $mty; #[inline(always)] fn all_lt(self, other: Self) -> bool { self.lt(other).all() } #[inline(always)] fn all_le(self, other: Self) -> bool { self.le(other).all() } #[inline(always)] fn all_finite(self) -> bool { self.finite_mask().all() } #[inline(always)] fn finite_mask(self) -> Self::Mask { // This can possibly be done faster by checking bit patterns let neg_inf = $ty::splat(::core::$f_scalar::NEG_INFINITY); let pos_inf = $ty::splat(::core::$f_scalar::INFINITY); self.gt(neg_inf) & self.lt(pos_inf) } #[inline(always)] fn gt_mask(self, other: Self) -> Self::Mask { self.gt(other) } #[inline(always)] fn ge_mask(self, other: Self) -> Self::Mask { self.ge(other) } #[inline(always)] fn decrease_masked(self, mask: Self::Mask) -> Self { // Casting a mask into ints will produce all bits set for // true, and 0 for false. Adding that to the binary // representation of a float means subtracting one from // the binary representation, resulting in the next lower // value representable by $ty. This works even when the // current value is infinity. debug_assert!(mask.any(), "At least one lane must be set"); <$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask)) } type UInt = $uty; #[inline] fn cast_from_int(i: Self::UInt) -> Self { i.cast() } } } } #[cfg(feature="simd_support")] simd_impl! { f32x2, f32, m32x2, u32x2 } #[cfg(feature="simd_support")] simd_impl! { f32x4, f32, m32x4, u32x4 } #[cfg(feature="simd_support")] simd_impl! { f32x8, f32, m32x8, u32x8 } #[cfg(feature="simd_support")] simd_impl! { f32x16, f32, m32x16, u32x16 } #[cfg(feature="simd_support")] simd_impl! { f64x2, f64, m64x2, u64x2 } #[cfg(feature="simd_support")] simd_impl! { f64x4, f64, m64x4, u64x4 } #[cfg(feature="simd_support")] simd_impl! { f64x8, f64, m64x8, u64x8 } /// Calculates ln(gamma(x)) (natural logarithm of the gamma /// function) using the Lanczos approximation. /// /// The approximation expresses the gamma function as: /// `gamma(z+1) = sqrt(2*pi)*(z+g+0.5)^(z+0.5)*exp(-z-g-0.5)*Ag(z)` /// `g` is an arbitrary constant; we use the approximation with `g=5`. /// /// Noting that `gamma(z+1) = z*gamma(z)` and applying `ln` to both sides: /// `ln(gamma(z)) = (z+0.5)*ln(z+g+0.5)-(z+g+0.5) + ln(sqrt(2*pi)*Ag(z)/z)` /// /// `Ag(z)` is an infinite series with coefficients that can be calculated /// ahead of time - we use just the first 6 terms, which is good enough /// for most purposes. #[cfg(feature="std")] pub fn log_gamma(x: f64) -> f64 { // precalculated 6 coefficients for the first 6 terms of the series let coefficients: [f64; 6] = [ 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ]; // (x+0.5)*ln(x+g+0.5)-(x+g+0.5) let tmp = x + 5.5; let log = (x + 0.5) * tmp.ln() - tmp; // the first few terms of the series for Ag(x) let mut a = 1.000000000190015; let mut denom = x; for coeff in &coefficients { denom += 1.0; a += coeff / denom; } // get everything together // a is Ag(x) // 2.5066... is sqrt(2pi) log + (2.5066282746310005 * a / x).ln() } /// Sample a random number using the Ziggurat method (specifically the /// ZIGNOR variant from Doornik 2005). Most of the arguments are /// directly from the paper: /// /// * `rng`: source of randomness /// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0. /// * `X`: the $x_i$ abscissae. /// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$) /// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$ /// * `pdf`: the probability density function /// * `zero_case`: manual sampling from the tail when we chose the /// bottom box (i.e. i == 0) // the perf improvement (25-50%) is definitely worth the extra code // size from force-inlining. #[cfg(feature="std")] #[inline(always)] pub fn ziggurat<R: Rng + ?Sized, P, Z>( rng: &mut R, symmetric: bool, x_tab: ziggurat_tables::ZigTable, f_tab: ziggurat_tables::ZigTable, mut pdf: P, mut zero_case: Z) -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64 { use crate::distributions::float::IntoFloat; loop { // As an optimisation we re-implement the conversion to a f64. // From the remaining 12 most significant bits we use 8 to construct `i`. // This saves us generating a whole extra random number, while the added // precision of using 64 bits for f64 does not buy us much. let bits = rng.next_u64(); let i = bits as usize & 0xff; let u = if symmetric { // Convert to a value in the range [2,4) and substract to get [-1,1) // We can't convert to an open range directly, that would require // substracting `3.0 - EPSILON`, which is not representable. // It is possible with an extra step, but an open range does not // seem neccesary for the ziggurat algorithm anyway. (bits >> 12).into_float_with_exponent(1) - 3.0 } else { // Convert to a value in the range [1,2) and substract to get (0,1) (bits >> 12).into_float_with_exponent(0) - (1.0 - ::core::f64::EPSILON / 2.0) }; let x = u * x_tab[i]; let test_x = if symmetric { x.abs() } else {x}; // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i]) if test_x < x_tab[i + 1] { return x; } if i == 0 { return zero_case(rng, u); } // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1 if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) { return x; } } }
#[cfg(not(target_feature = "sse4.2"))] wmul_impl! { (u16x8, u32x8),, 16 }
instagram_test.go
// Copyright 2013 The go-instagram AUTHORS. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package instagram import ( "net/http" "net/http/httptest" "net/url" "testing" ) var ( // mux is the HTTP request multiplexer used with the test server. mux *http.ServeMux // client is the Instagram client being tested. client *Client // server is a test HTTP server used to provide mock API responses. server *httptest.Server ) func setup() { // test server mux = http.NewServeMux() server = httptest.NewServer(mux) // Instagram client configured to use test server client = NewClient(nil) client.BaseURL, _ = url.Parse(server.URL) } // teardown closes the test HTTP server. func teardown() { server.Close() } func testMethod(t *testing.T, r *http.Request, want string) { if want != r.Method { t.Errorf("Request method = %v, want %v", r.Method, want) } } type values map[string]string func testFormValues(t *testing.T, r *http.Request, values values) { for key, want := range values { if v := r.FormValue(key); v != want { t.Errorf("Request parameter %v = %v, want %v", key, v, want) } } } func TestNewClient(t *testing.T) { c := NewClient(nil) want := "https://api.instagram.com/v1/" if c.BaseURL.String() != want { t.Errorf("NewClient BaseURL = %v, want %v", c.BaseURL.String(), want) } want = "github.com/gedex/go-instagram v0.5" if c.UserAgent != want { t.Errorf("NewClient UserAgent = %v, want %v", c.UserAgent, want)
} } func TestNewRequest(t *testing.T) { c := NewClient(nil) // set access_token c.AccessToken = "token" inURL, outURL := "foo", c.BaseURL.String()+"foo?access_token="+c.AccessToken req, _ := c.NewRequest("GET", inURL, "") // test that relative URL was expanded and access token appears in query string if req.URL.String() != outURL { t.Errorf("NewRequest(%v) URL = %v, want %v", inURL, req.URL, outURL) } // test that default user-agent is attached to the requet userAgent := req.Header.Get("User-Agent") if c.UserAgent != userAgent { t.Errorf("NewRequest() User-Agent = %v, want %v", userAgent, c.UserAgent) } }
reactive_relation_instance_builder_test.rs
use crate::tests::utils::r_string; use crate::{ EntityTypeBuilder, ReactiveEntityInstanceBuilder, ReactiveRelationInstanceBuilder, RelationTypeBuilder, }; use inexor_rgf_core_model::{DataType, PropertyInstanceGetter}; use serde_json::json; use uuid::Uuid; #[test] fn
() { let entity_type_name = r_string(); let entity_type = EntityTypeBuilder::new(entity_type_name.clone()).build(); let outbound_id = Uuid::new_v4(); let inbound_id = Uuid::new_v4(); let outbound = ReactiveEntityInstanceBuilder::from(entity_type.clone()) .id(outbound_id) .get(); let inbound = ReactiveEntityInstanceBuilder::from(entity_type.clone()) .id(inbound_id) .get(); let type_name = r_string(); let property_1_name = r_string(); let property_1_value = r_string(); let relation_instance = ReactiveRelationInstanceBuilder::new(outbound, type_name.clone(), inbound) .property(property_1_name.clone(), json!(property_1_value.clone())) .get(); assert_eq!(type_name, relation_instance.type_name); assert_eq!( property_1_value.clone().as_str(), relation_instance .get(property_1_name.clone()) .unwrap() .as_str() .unwrap() ); assert!(relation_instance.get(r_string()).is_none()); } #[test] fn reactive_relation_instance_builder_set_property_defaults_test() { let entity_type_name = r_string(); let entity_type = EntityTypeBuilder::new(entity_type_name.clone()).build(); let outbound_id = Uuid::new_v4(); let inbound_id = Uuid::new_v4(); let outbound = ReactiveEntityInstanceBuilder::from(entity_type.clone()) .id(outbound_id) .get(); let inbound = ReactiveEntityInstanceBuilder::from(entity_type.clone()) .id(inbound_id) .get(); let type_name = r_string(); let property_1_name = r_string(); let property_2_name = r_string(); let property_3_name = r_string(); let property_3_value = r_string(); let relation_type = RelationTypeBuilder::new( entity_type_name.clone(), type_name.clone(), entity_type_name.clone(), ) .property(property_1_name.clone(), DataType::String) .property(property_2_name.clone(), DataType::Number) .property(property_3_name.clone(), DataType::String) .build(); let relation_instance = ReactiveRelationInstanceBuilder::new(outbound, type_name.clone(), inbound) .set_properties_defaults(relation_type.clone()) .property(property_3_name.clone(), json!(property_3_value.clone())) .get(); assert_eq!(type_name, relation_instance.type_name); assert_eq!( DataType::String.default_value(), relation_instance .get(property_1_name.clone()) .unwrap() .as_str() .unwrap() ); assert_eq!( DataType::Number.default_value(), relation_instance .get(property_2_name.clone()) .unwrap() .as_i64() .unwrap() ); assert_eq!( property_3_value.clone().as_str(), relation_instance .get(property_3_name.clone()) .unwrap() .as_str() .unwrap() ); assert!(relation_instance.get(r_string()).is_none()); }
reactive_relation_instance_builder_test
structured_snippet_placeholder_field.pb.go
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.1 // protoc v3.17.3 // source: google/ads/googleads/v7/enums/structured_snippet_placeholder_field.proto package enums import ( reflect "reflect" sync "sync" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // Possible values for Structured Snippet placeholder fields. type StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField int32 const ( // Not specified. StructuredSnippetPlaceholderFieldEnum_UNSPECIFIED StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField = 0 // Used for return value only. Represents value unknown in this version. StructuredSnippetPlaceholderFieldEnum_UNKNOWN StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField = 1 // Data Type: STRING. The category of snippet of your products/services. // Must match exactly one of the predefined structured snippets headers. // For a list, visit // https://developers.google.com/adwords/api/docs/appendix/structured-snippet-headers StructuredSnippetPlaceholderFieldEnum_HEADER StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField = 2 // Data Type: STRING_LIST. Text values that describe your products/services. // All text must be family safe. Special or non-ASCII characters are not // permitted. A snippet can be at most 25 characters. StructuredSnippetPlaceholderFieldEnum_SNIPPETS StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField = 3 ) // Enum value maps for StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField. var ( StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField_name = map[int32]string{ 0: "UNSPECIFIED", 1: "UNKNOWN", 2: "HEADER", 3: "SNIPPETS", } StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField_value = map[string]int32{ "UNSPECIFIED": 0, "UNKNOWN": 1, "HEADER": 2, "SNIPPETS": 3, } ) func (x StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) Enum() *StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField { p := new(StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) *p = x return p } func (x StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) Descriptor() protoreflect.EnumDescriptor { return file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_enumTypes[0].Descriptor() } func (StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) Type() protoreflect.EnumType { return &file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_enumTypes[0] } func (x StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField.Descriptor instead. func (StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField) EnumDescriptor() ([]byte, []int) { return file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescGZIP(), []int{0, 0} } // Values for Structured Snippet placeholder fields. type StructuredSnippetPlaceholderFieldEnum struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *StructuredSnippetPlaceholderFieldEnum) Reset() { *x = StructuredSnippetPlaceholderFieldEnum{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *StructuredSnippetPlaceholderFieldEnum) String() string { return protoimpl.X.MessageStringOf(x) } func (*StructuredSnippetPlaceholderFieldEnum) ProtoMessage() {} func (x *StructuredSnippetPlaceholderFieldEnum) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use StructuredSnippetPlaceholderFieldEnum.ProtoReflect.Descriptor instead. func (*StructuredSnippetPlaceholderFieldEnum) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescGZIP(), []int{0} } var File_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto protoreflect.FileDescriptor var file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDesc = []byte{ 0x0a, 0x48, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x25, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x53, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x75, 0x6d, 0x22, 0x5b, 0x0a, 0x21, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x53, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, 0x49, 0x50, 0x50, 0x45, 0x54, 0x53, 0x10, 0x03, 0x42, 0xfb, 0x01, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x42, 0x26, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x53, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x37, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xca, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x37, 0x5c, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xea, 0x02, 0x21, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x37, 0x3a, 0x3a, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescOnce sync.Once file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescData = file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDesc ) func file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescGZIP() []byte { file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescOnce.Do(func() { file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescData) }) return file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDescData } var file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_goTypes = []interface{}{ (StructuredSnippetPlaceholderFieldEnum_StructuredSnippetPlaceholderField)(0), // 0: google.ads.googleads.v7.enums.StructuredSnippetPlaceholderFieldEnum.StructuredSnippetPlaceholderField (*StructuredSnippetPlaceholderFieldEnum)(nil), // 1: google.ads.googleads.v7.enums.StructuredSnippetPlaceholderFieldEnum } var file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type 0, // [0:0] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } func init() { file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_init() } func file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_init() { if File_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto != nil { return } if !protoimpl.UnsafeEnabled { file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StructuredSnippetPlaceholderFieldEnum); i { case 0: return &v.state case 1: return &v.sizeCache
default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDesc, NumEnums: 1, NumMessages: 1, NumExtensions: 0, NumServices: 0, }, GoTypes: file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_goTypes, DependencyIndexes: file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_depIdxs, EnumInfos: file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_enumTypes, MessageInfos: file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_msgTypes, }.Build() File_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto = out.File file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_rawDesc = nil file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_goTypes = nil file_google_ads_googleads_v7_enums_structured_snippet_placeholder_field_proto_depIdxs = nil }
case 2: return &v.unknownFields
types.go
// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" dnsscheme "github.com/gardener/external-dns-management/pkg/client/dns/clientset/versioned/scheme" gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned" gardencorescheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" gardenextensionsscheme "github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme" gardenclientset "github.com/gardener/gardener/pkg/client/garden/clientset/versioned" gardenscheme "github.com/gardener/gardener/pkg/client/garden/clientset/versioned/scheme" machineclientset "github.com/gardener/gardener/pkg/client/machine/clientset/versioned" machinescheme "github.com/gardener/gardener/pkg/client/machine/clientset/versioned/scheme" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/discovery" kubernetesclientset "k8s.io/client-go/kubernetes" corescheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" apiregistrationclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" apiregistrationscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" "sigs.k8s.io/controller-runtime/pkg/client" ) const ( // CronJobs is a constant for a Kubernetes resource with the same name. CronJobs = "cronjobs" // CustomResourceDefinitions is a constant for a Kubernetes resource with the same name. CustomResourceDefinitions = "customresourcedefinitions" // DaemonSets is a constant for a Kubernetes resource with the same name. DaemonSets = "daemonsets" // Deployments is a constant for a Kubernetes resource with the same name. Deployments = "deployments" // Ingresses is a constant for a Kubernetes resource with the same name. Ingresses = "ingresses" // Jobs is a constant for a Kubernetes resource with the same name. Jobs = "jobs" // Namespaces is a constant for a Kubernetes resource with the same name. Namespaces = "namespaces" // PersistentVolumeClaims is a constant for a Kubernetes resource with the same name. PersistentVolumeClaims = "persistentvolumeclaims" // PersistentVolumes is a constant for a Kubernetes resource with the same name. PersistentVolumes = "persistentvolumes" // Pods is a constant for a Kubernetes resource with the same name. Pods = "pods" // ReplicaSets is a constant for a Kubernetes resource with the same name. ReplicaSets = "replicasets" // ReplicationControllers is a constant for a Kubernetes resource with the same name. ReplicationControllers = "replicationcontrollers" // Services is a constant for a Kubernetes resource with the same name. Services = "services" // StatefulSets is a constant for a Kubernetes resource with the same name. StatefulSets = "statefulsets" ) var ( // GardenScheme is the scheme used in the Garden cluster. GardenScheme = runtime.NewScheme() // SeedScheme is the scheme used in the Seed cluster. SeedScheme = runtime.NewScheme() // ShootScheme is the scheme used in the Shoot cluster. ShootScheme = runtime.NewScheme() // PlantScheme is the scheme used in the Plant cluster PlantScheme = runtime.NewScheme() // DefaultDeleteOptionFuncs use foreground propagation policy and grace period of 60 seconds. DefaultDeleteOptionFuncs = []client.DeleteOptionFunc{ client.PropagationPolicy(metav1.DeletePropagationForeground), client.GracePeriodSeconds(60), } propagationPolicy = metav1.DeletePropagationForeground gracePeriodSeconds = int64(60) defaultDeleteOptions = metav1.DeleteOptions{ PropagationPolicy: &propagationPolicy, GracePeriodSeconds: &gracePeriodSeconds, } zero int64 backgroundDeletion = metav1.DeletePropagationBackground forceDeleteOptions = metav1.DeleteOptions{ GracePeriodSeconds: &zero, PropagationPolicy: &backgroundDeletion, } ) func init()
// Clientset is a struct containing the configuration for the respective Kubernetes // cluster, the collection of Kubernetes clients <Clientset> containing all REST clients // for the built-in Kubernetes API groups, and the Garden which is a REST clientset // for the Garden API group. // The RESTClient itself is a normal HTTP client for the respective Kubernetes cluster, // allowing requests to arbitrary URLs. // The version string contains only the major/minor part in the form <major>.<minor>. type Clientset struct { config *rest.Config restMapper meta.RESTMapper restClient rest.Interface applier ApplierInterface client client.Client kubernetes kubernetesclientset.Interface garden gardenclientset.Interface gardenCore gardencoreclientset.Interface machine machineclientset.Interface apiextension apiextensionsclientset.Interface apiregistration apiregistrationclientset.Interface version string } // Applier is a default implementation of the ApplyInterface. It applies objects with // by first checking whether they exist and then either creating / updating them (update happens // with a predefined merge logic). type Applier struct { client client.Client discovery discovery.CachedDiscoveryInterface } // MergeFunc determines how oldOj is merged into new oldObj. type MergeFunc func(newObj, oldObj *unstructured.Unstructured) // ApplierOptions contains options used by the Applier. type ApplierOptions struct { MergeFuncs map[schema.GroupKind]MergeFunc } // ApplierInterface is an interface which describes declarative operations to apply multiple // Kubernetes objects. type ApplierInterface interface { ApplyManifest(ctx context.Context, unstructured UnstructuredReader, options ApplierOptions) error } // Interface is used to wrap the interactions with a Kubernetes cluster // (which are performed with the help of kubernetes/client-go) in order to allow the implementation // of several Kubernetes versions. type Interface interface { RESTConfig() *rest.Config RESTMapper() meta.RESTMapper RESTClient() rest.Interface Client() client.Client Applier() ApplierInterface Kubernetes() kubernetesclientset.Interface Garden() gardenclientset.Interface GardenCore() gardencoreclientset.Interface Machine() machineclientset.Interface APIExtension() apiextensionsclientset.Interface APIRegistration() apiregistrationclientset.Interface // Namespaces // Deprecated: Use `Client()` and utils instead. CreateNamespace(*corev1.Namespace, bool) (*corev1.Namespace, error) // Deprecated: Use `Client()` and utils instead. GetNamespace(string) (*corev1.Namespace, error) // Deprecated: Use `Client()` and utils instead. ListNamespaces(metav1.ListOptions) (*corev1.NamespaceList, error) // Deprecated: Use `Client()` and utils instead. PatchNamespace(name string, body []byte) (*corev1.Namespace, error) // Deprecated: Use `Client()` and utils instead. DeleteNamespace(string) error // Secrets // Deprecated: Use `Client()` and utils instead. CreateSecret(string, string, corev1.SecretType, map[string][]byte, bool) (*corev1.Secret, error) // Deprecated: Use `Client()` and utils instead. CreateSecretObject(*corev1.Secret, bool) (*corev1.Secret, error) // Deprecated: Use `Client()` and utils instead. UpdateSecretObject(*corev1.Secret) (*corev1.Secret, error) // Deprecated: Use `Client()` and utils instead. ListSecrets(string, metav1.ListOptions) (*corev1.SecretList, error) // Deprecated: Use `Client()` and utils instead. GetSecret(string, string) (*corev1.Secret, error) // Deprecated: Use `Client()` and utils instead. DeleteSecret(string, string) error // ConfigMaps // Deprecated: Use `Client()` and utils instead. CreateConfigMap(string, string, map[string]string, bool) (*corev1.ConfigMap, error) // Deprecated: Use `Client()` and utils instead. UpdateConfigMap(string, string, map[string]string) (*corev1.ConfigMap, error) // Deprecated: Use `Client()` and utils instead. GetConfigMap(string, string) (*corev1.ConfigMap, error) // Deprecated: Use `Client()` and utils instead. DeleteConfigMap(string, string) error // Services // Deprecated: Use `Client()` and utils instead. GetService(string, string) (*corev1.Service, error) // Deprecated: Use `Client()` and utils instead. DeleteService(string, string) error // Deployments // Deprecated: Use `Client()` and utils instead. GetDeployment(string, string) (*appsv1.Deployment, error) // Deprecated: Use `Client()` and utils instead. ListDeployments(string, metav1.ListOptions) (*appsv1.DeploymentList, error) // Deprecated: Use `Client()` and utils instead. PatchDeployment(string, string, []byte) (*appsv1.Deployment, error) // Deprecated: Use `Client()` and utils instead. DeleteDeployment(string, string) error // StatefulSets // Deprecated: Use `Client()` and utils instead. ListStatefulSets(string, metav1.ListOptions) (*appsv1.StatefulSetList, error) // Deprecated: Use `Client()` and utils instead. DeleteStatefulSet(string, string) error // DaemonSets // Deprecated: Use `Client()` and utils instead. DeleteDaemonSet(string, string) error // Jobs // Deprecated: Use `Client()` and utils instead. GetJob(string, string) (*batchv1.Job, error) // Deprecated: Use `Client()` and utils instead. DeleteJob(string, string) error // Deprecated: Use `Client()` and utils instead. DeleteCronJob(string, string) error // Pods // Deprecated: Use `Client()` and utils instead. GetPod(string, string) (*corev1.Pod, error) // Deprecated: Use `Client()` and utils instead. ListPods(string, metav1.ListOptions) (*corev1.PodList, error) // Deprecated: Use `Client()` and utils instead. ForwardPodPort(string, string, int, int) (chan struct{}, error) CheckForwardPodPort(string, string, int, int) error // Deprecated: Use `Client()` and utils instead. DeletePod(string, string) error // Deprecated: Use `Client()` and utils instead. DeletePodForcefully(string, string) error // Nodes // Deprecated: Use `Client()` and utils instead. ListNodes(metav1.ListOptions) (*corev1.NodeList, error) // RBAC // Deprecated: Use `Client()` and utils instead. ListRoleBindings(string, metav1.ListOptions) (*rbacv1.RoleBindingList, error) // Deprecated: Use `Client()` and utils instead. DeleteClusterRole(name string) error // Deprecated: Use `Client()` and utils instead. DeleteClusterRoleBinding(name string) error // Deprecated: Use `Client()` and utils instead. DeleteRoleBinding(namespace, name string) error // ServiceAccounts // Deprecated: Use `Client()` and utils instead. DeleteServiceAccount(namespace, name string) error // HorizontalPodAutoscalers // Deprecated: Use `Client()` and utils instead. DeleteHorizontalPodAutoscaler(namespace, name string) error // Ingresses // Deprecated: Use `Client()` and utils instead. DeleteIngress(namespace, name string) error // NetworkPolicies // Deprecated: Use `Client()` and utils instead. DeleteNetworkPolicy(namespace, name string) error Version() string }
{ gardenSchemeBuilder := runtime.NewSchemeBuilder( corescheme.AddToScheme, gardenscheme.AddToScheme, gardencorescheme.AddToScheme, ) utilruntime.Must(gardenSchemeBuilder.AddToScheme(GardenScheme)) seedSchemeBuilder := runtime.NewSchemeBuilder( corescheme.AddToScheme, machinescheme.AddToScheme, dnsscheme.AddToScheme, gardenextensionsscheme.AddToScheme, ) utilruntime.Must(seedSchemeBuilder.AddToScheme(SeedScheme)) shootSchemeBuilder := runtime.NewSchemeBuilder( corescheme.AddToScheme, apiextensionsscheme.AddToScheme, apiregistrationscheme.AddToScheme, ) utilruntime.Must(shootSchemeBuilder.AddToScheme(ShootScheme)) plantSchemeBuilder := runtime.NewSchemeBuilder( corescheme.AddToScheme, gardencorescheme.AddToScheme, ) utilruntime.Must(plantSchemeBuilder.AddToScheme(PlantScheme)) }
generate.js
#!/usr/bin/env npx jbash set("-x"); set("-e"); const rootFolder = require("path").join(__dirname, "../"); const specFilename = `spec-v1-swagger.json`; const swaggerConfigFilename = `config.json`; cd(rootFolder);
// Update config file with latest package info const package = require("../package.json"); swaggerConfig = require(`./${swaggerConfigFilename}`); swaggerConfig.npmName = package.name; swaggerConfig.npmVersion = package.version; writeFile( `./.swagger-codegen/${swaggerConfigFilename}`, JSON.stringify(swaggerConfig, null, 2) ); // Copy ignore file to src/ (workaround for ignore-file-override option not working) exec(`cp ./.swagger-codegen/.swagger-codegen-ignore ./src`); // Share the current folder with docker, and then run the typescript-fetch generator, pointing to the given template const codegenVersion = "2.4.17"; exec(`docker pull swaggerapi/swagger-codegen-cli:${codegenVersion} && docker run -u \`id -u\` --rm -v ${rootFolder}:/local swaggerapi/swagger-codegen-cli:${codegenVersion} generate \ -i "/local/.swagger-codegen/${specFilename}" \ -l "typescript-fetch" \ -c "/local/.swagger-codegen/${swaggerConfigFilename}" \ -t "/local/.swagger-codegen/templates" \ -o "/local/src"`); // Move VERSION file out of src/ and into root .swagger-codegen folder for consistent organization exec( "mv ./src/.swagger-codegen/VERSION ./.swagger-codegen/ && rm -rf ./src/.swagger-codegen" ); // Remove ignore file from src/ exec(`rm ./src/.swagger-codegen-ignore`);
exec( `wget http://127.0.0.1:8000/swagger.json -O ./.swagger-codegen/${specFilename}` );
register.go
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta3 import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/runtime" ) // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: "v1beta3"} // Codec encodes internal objects to the v1 scheme var Codec = runtime.CodecFor(api.Scheme, SchemeGroupVersion.String()) func init() { // Check if v1beta3 is in the list of supported API versions. if !registered.IsRegisteredAPIGroupVersion(SchemeGroupVersion) { return } // Register the API. addKnownTypes() addConversionFuncs() addDefaultingFuncs() } // Adds the list of known types to api.Scheme. func
() { api.Scheme.AddKnownTypes(SchemeGroupVersion, &Pod{}, &PodList{}, &PodStatusResult{}, &PodTemplate{}, &PodTemplateList{}, &ReplicationController{}, &ReplicationControllerList{}, &Service{}, &ServiceList{}, &Endpoints{}, &EndpointsList{}, &Node{}, &NodeList{}, &Binding{}, &Event{}, &EventList{}, &List{}, &LimitRange{}, &LimitRangeList{}, &ResourceQuota{}, &ResourceQuotaList{}, &Namespace{}, &NamespaceList{}, &Secret{}, &SecretList{}, &ServiceAccount{}, &ServiceAccountList{}, &PersistentVolume{}, &PersistentVolumeList{}, &PersistentVolumeClaim{}, &PersistentVolumeClaimList{}, &DeleteOptions{}, &ListOptions{}, &PodAttachOptions{}, &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, &ComponentStatus{}, &ComponentStatusList{}, &SerializedReference{}, &RangeAllocation{}, &SecurityContextConstraints{}, &SecurityContextConstraintsList{}, ) // Legacy names are supported api.Scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("Minion"), &Node{}) api.Scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("MinionList"), &NodeList{}) // Add common types api.Scheme.AddKnownTypes(SchemeGroupVersion, &unversioned.Status{}) } func (*Pod) IsAnAPIObject() {} func (*PodList) IsAnAPIObject() {} func (*PodStatusResult) IsAnAPIObject() {} func (*PodTemplate) IsAnAPIObject() {} func (*PodTemplateList) IsAnAPIObject() {} func (*ReplicationController) IsAnAPIObject() {} func (*ReplicationControllerList) IsAnAPIObject() {} func (*Service) IsAnAPIObject() {} func (*ServiceList) IsAnAPIObject() {} func (*Endpoints) IsAnAPIObject() {} func (*EndpointsList) IsAnAPIObject() {} func (*Node) IsAnAPIObject() {} func (*NodeList) IsAnAPIObject() {} func (*Binding) IsAnAPIObject() {} func (*Event) IsAnAPIObject() {} func (*EventList) IsAnAPIObject() {} func (*List) IsAnAPIObject() {} func (*LimitRange) IsAnAPIObject() {} func (*LimitRangeList) IsAnAPIObject() {} func (*ResourceQuota) IsAnAPIObject() {} func (*ResourceQuotaList) IsAnAPIObject() {} func (*Namespace) IsAnAPIObject() {} func (*NamespaceList) IsAnAPIObject() {} func (*Secret) IsAnAPIObject() {} func (*SecretList) IsAnAPIObject() {} func (*ServiceAccount) IsAnAPIObject() {} func (*ServiceAccountList) IsAnAPIObject() {} func (*PersistentVolume) IsAnAPIObject() {} func (*PersistentVolumeList) IsAnAPIObject() {} func (*PersistentVolumeClaim) IsAnAPIObject() {} func (*PersistentVolumeClaimList) IsAnAPIObject() {} func (*DeleteOptions) IsAnAPIObject() {} func (*ListOptions) IsAnAPIObject() {} func (*PodAttachOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} func (*ComponentStatus) IsAnAPIObject() {} func (*ComponentStatusList) IsAnAPIObject() {} func (*SerializedReference) IsAnAPIObject() {} func (*RangeAllocation) IsAnAPIObject() {} func (*SecurityContextConstraints) IsAnAPIObject() {} func (*SecurityContextConstraintsList) IsAnAPIObject() {}
addKnownTypes
node_integration_test.go
// +build integration
import ( "fmt" "os" "testing" "github.com/elastic/beats/libbeat/tests/compose" mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) func TestData(t *testing.T) { compose.EnsureUp(t, "rabbitmq") f := mbtest.NewEventsFetcher(t, getConfig()) err := mbtest.WriteEvents(f, t) if err != nil { t.Fatal("write", err) } } func getConfig() map[string]interface{} { return map[string]interface{}{ "module": "rabbitmq", "metricsets": []string{"node"}, "hosts": getTestRabbitMQHost(), "username": getTestRabbitMQUsername(), "password": getTestRabbitMQPassword(), } } const ( rabbitmqDefaultHost = "localhost" rabbitmqDefaultPort = "15672" rabbitmqDefaultUsername = "guest" rabbitmqDefaultPassword = "guest" ) func getTestRabbitMQHost() string { return fmt.Sprintf("%v:%v", getenv("RABBITMQ_HOST", rabbitmqDefaultHost), getenv("RABBITMQ_PORT", rabbitmqDefaultPort), ) } func getTestRabbitMQUsername() string { return getenv("RABBITMQ_USERNAME", rabbitmqDefaultUsername) } func getTestRabbitMQPassword() string { return getenv("RABBITMQ_PASSWORD", rabbitmqDefaultPassword) } func getenv(name, defaultValue string) string { return strDefault(os.Getenv(name), defaultValue) } func strDefault(a, defaults string) string { if len(a) == 0 { return defaults } return a }
package node
fortios_log_fortiguard_override_filter.py
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_log_fortiguard_override_filter short_description: Override filters for FortiCloud in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify log_fortiguard feature and override_filter category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username:
description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true log_fortiguard_override_filter: description: - Override filters for FortiCloud. default: null suboptions: anomaly: description: - Enable/disable anomaly logging. choices: - enable - disable dlp-archive: description: - Enable/disable DLP archive logging. choices: - enable - disable dns: description: - Enable/disable detailed DNS event logging. choices: - enable - disable filter: description: - FortiCloud log filter. filter-type: description: - Include/exclude logs that match the filter. choices: - include - exclude forward-traffic: description: - Enable/disable forward traffic logging. choices: - enable - disable gtp: description: - Enable/disable GTP messages logging. choices: - enable - disable local-traffic: description: - Enable/disable local in or out traffic logging. choices: - enable - disable multicast-traffic: description: - Enable/disable multicast traffic logging. choices: - enable - disable netscan-discovery: description: - Enable/disable netscan discovery event logging. choices: netscan-vulnerability: description: - Enable/disable netscan vulnerability event logging. choices: severity: description: - Lowest severity level to log. choices: - emergency - alert - critical - error - warning - notification - information - debug sniffer-traffic: description: - Enable/disable sniffer traffic logging. choices: - enable - disable ssh: description: - Enable/disable SSH logging. choices: - enable - disable voip: description: - Enable/disable VoIP logging. choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Override filters for FortiCloud. fortios_log_fortiguard_override_filter: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" log_fortiguard_override_filter: anomaly: "enable" dlp-archive: "enable" dns: "enable" filter: "<your_own_value>" filter-type: "include" forward-traffic: "enable" gtp: "enable" local-traffic: "enable" multicast-traffic: "enable" netscan-discovery: "<your_own_value>" netscan-vulnerability: "<your_own_value>" severity: "emergency" sniffer-traffic: "enable" ssh: "enable" voip: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule def login(data, fos): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_log_fortiguard_override_filter_data(json): option_list = ['anomaly', 'dlp-archive', 'dns', 'filter', 'filter-type', 'forward-traffic', 'gtp', 'local-traffic', 'multicast-traffic', 'netscan-discovery', 'netscan-vulnerability', 'severity', 'sniffer-traffic', 'ssh', 'voip'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def log_fortiguard_override_filter(data, fos): vdom = data['vdom'] log_fortiguard_override_filter_data = data['log_fortiguard_override_filter'] filtered_data = filter_log_fortiguard_override_filter_data(log_fortiguard_override_filter_data) return fos.set('log.fortiguard', 'override-filter', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_log_fortiguard(data, fos): login(data, fos) if data['log_fortiguard_override_filter']: resp = log_fortiguard_override_filter(data, fos) fos.logout() return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "log_fortiguard_override_filter": { "required": False, "type": "dict", "options": { "anomaly": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "dlp-archive": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "dns": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "filter": {"required": False, "type": "str"}, "filter-type": {"required": False, "type": "str", "choices": ["include", "exclude"]}, "forward-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "gtp": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "local-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "multicast-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "netscan-discovery": {"required": False, "type": "str", "choices": []}, "netscan-vulnerability": {"required": False, "type": "str", "choices": []}, "severity": {"required": False, "type": "str", "choices": ["emergency", "alert", "critical", "error", "warning", "notification", "information", "debug"]}, "sniffer-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "ssh": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "voip": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() is_error, has_changed, result = fortios_log_fortiguard(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
description: - FortiOS or FortiGate username. required: true password:
add-user.component.ts
import { Component, Input, OnInit } from "@angular/core"; import { NzDrawerRef, NzDrawerService } from "ng-zorro-antd"; import { Engine } from "../../interfaces/engine.interface"; import { User } from "../../interfaces/user.interface"; import { ManagementService } from "../../services/management/management.service"; import { CreateUserComponent } from "../create-user/create-user.component"; @Component({ selector: "app-add-user", templateUrl: "./add-user.component.html", styleUrls: ["./add-user.component.css"], }) export class AddUserComponent implements OnInit { /** * Engine selected Users will be added to */ @Input() public engine: Engine; /** * Status of the http request/s adding Users to Engine */ public addUsersStatus: "idle" | "pending" | "success" | "error" = "idle"; public selectedUsers: (string | number)[] = []; public availableUsers: User[] = []; constructor( private management: ManagementService, private drawerRef: NzDrawerRef, private drawer: NzDrawerService, ) {} ngOnInit(): void { this.getAvailableUsers(); console.log(this); } public getAvailableUsers(): void { this.management.getUsers().subscribe({ next: (users) => { this.availableUsers = users.filter((user) => {
console.error(error); }, }); } /** * Opens a drawer where the user can create a new User */ public createNewUser(): void { this.drawerRef.nzOffsetX = Number(this.drawerRef.nzOffsetX) + 175; const drawerRef = this.drawer.create<CreateUserComponent, { type: string }, string>({ nzTitle: "Create New User", nzContent: CreateUserComponent, nzWidth: "350px", }); // Update available Users after a new one was created drawerRef.afterClose.subscribe(() => { this.drawerRef.nzOffsetX = Number(this.drawerRef.nzOffsetX) - 175; this.getAvailableUsers(); }); } /** * Adds selected Endpoints to Engine */ public addUsers(): void { this.addUsersStatus = "pending"; this.management.addUsersToEngines(this.selectedUsers, [this.engine.id]).subscribe({ next: () => { this.addUsersStatus = "success"; this.close(); }, error: (error) => { this.addUsersStatus = "error"; console.error(error); }, }); } /** * Closes the current drawer */ public close(): void { this.drawerRef.close(); } }
return !this.engine.users.some((u) => u.id === user.id); }); }, error: (error) => {
power_law.py
import numpy as np from pyHalo.Rendering.MassFunctions.mass_function_utilities import integrate_power_law_analytic from pyHalo.Rendering.MassFunctions.mass_function_utilities import WDM_suppression class GeneralPowerLaw(object): """ This class handles computations of a double power law mass function of the form dn/dm = m^x * (1 + (a * m_c / m)^b)^c where a, b, and c are constants, and m_c is a characteristic mass scale. The keywords for a, b, c are a_wdm, b_wdm, and c_wdm, respectively Lovell 2020 fit this mass function to simulations of Warm Dark Matter cosmologies and find (a, b, c) = (2.3, 0.8, -1) for central halos and (4.2, 2.5, -0.2) for subhalos """ def __init__(self, log_mlow, log_mhigh, power_law_index, draw_poisson, normalization, log_mc, a_wdm, b_wdm, c_wdm): if a_wdm is None: assert b_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None' assert c_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None' else: assert b_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm' assert c_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm' if b_wdm is None: assert a_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None' assert c_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None' else: assert a_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm' assert c_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm' if c_wdm is None: assert a_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None' assert b_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None' else: assert a_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm' assert b_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm' if normalization < 0: raise Exception('normalization cannot be < 0.') if c_wdm is not None and c_wdm > 0: raise ValueError('c_wdm should be a negative number (otherwise mass function gets steeper (unphysical)') if a_wdm is not None and a_wdm < 0: raise ValueError('a_wdm should be a positive number for suppression factor: ' '( 1 + (a_wdm * m/m_c)^b_wdm)^c_wdm') if np.any([a_wdm is None, b_wdm is None, c_wdm is None]): assert log_mc is None, 'If log_mc is specified, must also specify kwargs for a_wdm, b_wdm, c_wdm.' \ '(See documentation in pyHalo/Rendering/MassFunctions/Powerlaw/broken_powerlaw' self._log_mc = log_mc self._a_wdm = a_wdm self._b_wdm = b_wdm self._c_wdm = c_wdm self.draw_poisson = draw_poisson self._index = power_law_index self._mL = 10 ** log_mlow self._mH = 10 ** log_mhigh self._nhalos_mean_unbroken = integrate_power_law_analytic(normalization, 10 ** log_mlow, 10 ** log_mhigh, 0, power_law_index) def draw(self): """ Draws samples from a double power law distribution between mL and mH of the form m ^ power_law_index * (1 + (a*mc / m)^b )^c Physically, the second term multiplying m^power_law_index can be a suppression in the mass function on small scales. :param draw_poisson: :param _index: :param _mH: :param _mL: :param n_draw: :return: """ m = self._sample(self.draw_poisson, self._index, self._mH, self._mL, self._nhalos_mean_unbroken) if len(m) == 0 or self._log_mc is None: return m factor = WDM_suppression(m, 10 ** self._log_mc, self._a_wdm, self._b_wdm, self._c_wdm) u = np.random.rand(int(len(m))) inds = np.where(u < factor) return m[inds] def
(self, draw_poisson, index, mH, mL, n_draw): """ Draws samples from a power law distribution between mL and mH :param draw_poisson: :param _index: :param _mH: :param _mL: :param n_draw: :return: """ if draw_poisson: N = np.random.poisson(n_draw) else: N = int(round(np.round(n_draw))) x = np.random.rand(N) if index == -1: norm = np.log(mH / mL) X = mL * np.exp(norm * x) else: X = (x * (mH ** (1 + index) - mL ** (1 + index)) + mL ** ( 1 + index)) ** ( (1 + index) ** -1) return np.array(X)
_sample
serializers.py
from rest_framework import serializers from scripts.models import ScriptVersion
# Serializers define the API representation. class ScriptSerializer(serializers.ModelSerializer): name = serializers.CharField(source="script.name") score = serializers.ReadOnlyField(source="votes.count") class Meta: model = ScriptVersion fields = ["pk", "name", "version", "content", "score"]
helperSpec.js
'use strict'; /*jshint -W117 */ /*jshint globalstrict: true*/ /* jasmine specs for directives go here */ function replacer(k, v) { if (typeof v === 'function') { v = v.toString(); } else if (window.File && v instanceof File) { v = '[File]'; } else if (window.FileList && v instanceof FileList) { v = '[FileList]'; } return v; } describe('Method: olHelpers.createStyle', function() { var _olHelpers; beforeEach(module('openlayers-directive')); beforeEach(inject(function(olHelpers) { _olHelpers = olHelpers; })); beforeEach(function() { jasmine.addMatchers({ toBeJsonEqual: function() { return { compare: function(actual, expected) { var one = JSON.stringify(actual, replacer).replace(/(\\t|\\n)/g, ''); var two = JSON.stringify(expected, replacer).replace(/(\\t|\\n)/g, ''); return one === two; } }; } }); }); it('makes marker icon style', function() { var style = { image: { icon: { anchor: [0.5, 1], anchorXUnits: 'fraction', anchorYUnits: 'fraction', opacity: 0.90,
src: 'images/map-marker.png' } } }; expect(_olHelpers.createStyle(style)) .toEqual(new ol.style.Style({ fill: null, stroke: null, image: new ol.style.Icon(style.image.icon) })); }); it('makes drawing feature style', function() { var style = { fill: { color: 'rgba(0, 0, 255, 0.6)' }, stroke: { color: 'white', width: 3 } }; expect(_olHelpers.createStyle(style)) .toEqual(new ol.style.Style({ fill: new ol.style.Fill({ color: style.fill.color }), stroke: new ol.style.Stroke({ color: style.stroke.color, width: style.stroke.width }), image: null })); }); xit('makes circle feature style', function() { var style = { image: { circle: { radius: 8, fill: { color: 'rgba(0, 0, 255, 0.6)' }, stroke: { color: 'white', width: 3 } } } }; expect(_olHelpers.createStyle(style)) .toBeJsonEqual(new ol.style.Style({ fill: null, stroke: null, image: new ol.style.Circle({ radius: 8, fill: new ol.style.Fill({ color: 'rgba(0, 0, 255, 0.6)' }), stroke: new ol.style.Stroke({ color: 'white', width: 3 }) }) })); }); });
tests.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ backup_types::transaction::{ backup::{TransactionBackupController, TransactionBackupOpt}, restore::{TransactionRestoreController, TransactionRestoreOpt}, }, storage::{local_fs::LocalFs, BackupStorage}, utils::{ backup_service_client::BackupServiceClient, test_utils::{start_local_backup_service, tmp_db_with_random_content}, GlobalBackupOpt, GlobalRestoreOpt, RocksdbOpt, }, }; use diem_config::config::RocksdbConfig; use diem_temppath::TempPath; use diem_types::transaction::Version; use diemdb::DiemDB; use std::{convert::TryInto, mem::size_of, sync::Arc}; use storage_interface::DbReader; use tokio::time::Duration; #[test] fn
() { let (_src_db_dir, src_db, blocks) = tmp_db_with_random_content(); let tgt_db_dir = TempPath::new(); tgt_db_dir.create_as_dir().unwrap(); let backup_dir = TempPath::new(); backup_dir.create_as_dir().unwrap(); let store: Arc<dyn BackupStorage> = Arc::new(LocalFs::new(backup_dir.path().to_path_buf())); let (rt, port) = start_local_backup_service(src_db); let client = Arc::new(BackupServiceClient::new(format!( "http://localhost:{}", port ))); let latest_version = blocks.last().unwrap().1.ledger_info().version(); let total_txns = blocks.iter().fold(0, |x, b| x + b.0.len()); assert_eq!(latest_version as usize + 1, total_txns); let txns = blocks .iter() .map(|(txns, _li)| txns) .flatten() .map(|txn_to_commit| txn_to_commit.transaction()) .collect::<Vec<_>>(); let max_chunk_size = txns .iter() .map(|t| bcs::to_bytes(t).unwrap().len()) .max() .unwrap() // biggest txn + 115 // size of a serialized TransactionInfo + size_of::<u32>(); // record len header let first_ver_to_backup = (total_txns / 4) as Version; let num_txns_to_backup = total_txns - first_ver_to_backup as usize; let target_version = first_ver_to_backup + total_txns as Version / 2; let num_txns_to_restore = (target_version - first_ver_to_backup + 1) as usize; let manifest_handle = rt .block_on( TransactionBackupController::new( TransactionBackupOpt { start_version: first_ver_to_backup, num_transactions: num_txns_to_backup, }, GlobalBackupOpt { max_chunk_size }, client, Arc::clone(&store), ) .run(), ) .unwrap(); rt.block_on( TransactionRestoreController::new( TransactionRestoreOpt { manifest_handle, replay_from_version: None, // max }, GlobalRestoreOpt { dry_run: false, db_dir: Some(tgt_db_dir.path().to_path_buf()), target_version: Some(target_version), rocksdb_opt: RocksdbOpt::default(), } .try_into() .unwrap(), store, None, /* epoch_history */ ) .run(), ) .unwrap(); // We don't write down any ledger infos when recovering transactions. State-sync needs to take // care of it before running consensus. The latest transactions are deemed "synced" instead of // "committed" most likely. let tgt_db = DiemDB::open( &tgt_db_dir, true, /* read_only */ None, /* pruner */ RocksdbConfig::default(), ) .unwrap(); assert_eq!( tgt_db .get_latest_transaction_info_option() .unwrap() .unwrap() .0, target_version, ); let recovered_transactions = tgt_db .get_transactions( first_ver_to_backup, num_txns_to_restore as u64, target_version, true, /* fetch_events */ ) .unwrap(); assert_eq!( recovered_transactions.transactions, txns.into_iter() .skip(first_ver_to_backup as usize) .take(num_txns_to_restore) .cloned() .collect::<Vec<_>>() ); assert_eq!( recovered_transactions.events.unwrap(), blocks .iter() .map(|(txns, _li)| { txns.iter() .map(|txn_to_commit| txn_to_commit.events().to_vec()) }) .flatten() .skip(first_ver_to_backup as usize) .take(num_txns_to_restore) .collect::<Vec<_>>() ); rt.shutdown_timeout(Duration::from_secs(1)); }
end_to_end
main.py
import tkinter as tk # Create GUI object app = tk.Tk() def
(): print(Submitted) # Button 1 btn1_text = tk.StringVar() btn1_label = tk.Label(app, text='Button Name 1', font=('bold', 14), pady=15) btn1_label.grid(row=0, column=0, sticky=tk.W) btn1_entry = tk.Entry(app, textvariable=btn1_text) btn1_entry.grid(row=0, column=1) # Button 2 btn2_text = tk.StringVar() btn2_label = tk.Label(app, text='Button Name 2', font=('bold', 14)) btn2_label.grid(row=0, column=2, sticky=tk.W) btn2_entry = tk.Entry(app, textvariable=btn2_text) btn2_entry.grid(row=0, column=3) # Button 3 btn3_text = tk.StringVar() btn3_label = tk.Label(app, text='Button Name 3', font=('bold', 14), pady=15) btn3_label.grid(row=1, column=0, sticky=tk.W) btn3_entry = tk.Entry(app, textvariable=btn3_text) btn3_entry.grid(row=1, column=1) # Button 4 btn4_text = tk.StringVar() btn4_label = tk.Label(app, text='Button Name 4', font=('bold', 14)) btn4_label.grid(row=1, column=2, sticky=tk.W) btn4_entry = tk.Entry(app, textvariable=btn4_text) btn4_entry.grid(row=1, column=3) # Button 5 btn5_text = tk.StringVar() btn5_label = tk.Label(app, text='Button Name 5', font=('bold', 14), pady=15) btn5_label.grid(row=2, column=0, sticky=tk.W) btn5_entry = tk.Entry(app, textvariable=btn5_text) btn5_entry.grid(row=2, column=1) # Button 6 btn6_text = tk.StringVar() btn6_label = tk.Label(app, text='Button Name 6', font=('bold', 14)) btn6_label.grid(row=2, column=2, sticky=tk.W) btn6_entry = tk.Entry(app, textvariable=btn6_text) btn6_entry.grid(row=2, column=3) # UI Buttons submit_btn = tk.Button(app, text='Submit Changes', width=12, command=submit_changes) submit_btn.grid(row=3, column=0, pady=20) app.title('Video Editor Configuration Utility') app.geometry('1000x600') # Start the utility app.mainloop()
submit_changes
models.py
from datetime import datetime from django.db import models from django.contrib.auth.models import User class PointManager(models.Manager): """Manager for Pressure Points.""" def recently_added(self, count=10): return self.order_by('-time_added')[:count] class City(models.Model): """City the Pressure Point belong to.""" name = models.CharField(max_length=200) slug = models.SlugField() lat = models.FloatField() lon = models.FloatField() def __unicode__(self): return self.name class Point(models.Model): """Pressure Point model. The pressure points are the core concept of the app. They're small cases that the community shares, discusses about and eventually, take action upon in order to improve the quality of life. """ title = models.CharField(max_length=200) lat = models.FloatField() lon = models.FloatField() description = models.TextField() # descriptive address or directions on how to find the Point directions = models.TextField() time_added = models.DateTimeField() # simple voting mechanism (like/dislike) thumbsup = models.IntegerField() thumbsdown = models.IntegerField()
poster = models.ForeignKey(User) city = models.ForeignKey(City) # managers objects = PointManager() def __unicode__(self): return "%s x %s" % (self.lat, self.lon) class Photo(models.Model): """Photo objects illustrating Pressure Points.""" time_added = models.DateTimeField() thumbnail = models.ImageField(upload_to='upload/thumbnails', blank=True) original = models.ImageField(upload_to='upload/original') is_main = models.BooleanField() poster = models.ForeignKey(User) point = models.ForeignKey(Point, related_name='photos') def save(self, *args, **kwargs): if self.id is None: self.thumbnail = self.original super(Photo, self).save(*args, **kwargs) class FeatureManager(models.Manager): """Manager for Feature objects.""" def current(self): now = datetime.now() return self.filter(start_time__lt=now, end_time__gt=now) class Feature(models.Model): """Pressure Point features on the home page.""" start_time = models.DateTimeField() end_time = models.DateTimeField() point = models.ForeignKey(Point, related_name='features') objects = FeatureManager() class Resolution(models.Model): """Resolution objects describe how a Pressure Point was closed.""" description = models.TextField() time_resolved = models.DateTimeField() point = models.OneToOneField(Point, related_name='resolution')
# foreign keys
api_op_GetConfigurationSetEventDestinations.go
// Code generated by smithy-go-codegen DO NOT EDIT. package pinpointemail import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/pinpointemail/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Retrieve a list of event destinations that are associated with a configuration // set. In Amazon Pinpoint, events include message sends, deliveries, opens, // clicks, bounces, and complaints. Event destinations are places that you can send // information about these events to. For example, you can send event data to // Amazon SNS to receive notifications when you receive bounces or complaints, or // you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for // long-term storage. func (c *Client) GetConfigurationSetEventDestinations(ctx context.Context, params *GetConfigurationSetEventDestinationsInput, optFns ...func(*Options)) (*GetConfigurationSetEventDestinationsOutput, error) { if params == nil { params = &GetConfigurationSetEventDestinationsInput{} } result, metadata, err := c.invokeOperation(ctx, "GetConfigurationSetEventDestinations", params, optFns, c.addOperationGetConfigurationSetEventDestinationsMiddlewares) if err != nil { return nil, err } out := result.(*GetConfigurationSetEventDestinationsOutput) out.ResultMetadata = metadata return out, nil } // A request to obtain information about the event destinations for a configuration // set. type GetConfigurationSetEventDestinationsInput struct { // The name of the configuration set that contains the event destination. // // This member is required. ConfigurationSetName *string } // Information about an event destination for a configuration set. type GetConfigurationSetEventDestinationsOutput struct { // An array that includes all of the events destinations that have been configured // for the configuration set. EventDestinations []types.EventDestination // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func (c *Client) addOperationGetConfigurationSetEventDestinationsMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpGetConfigurationSetEventDestinations{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetConfigurationSetEventDestinations{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpGetConfigurationSetEventDestinationsValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetConfigurationSetEventDestinations(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opGetConfigurationSetEventDestinations(region string) *awsmiddleware.RegisterServiceMetadata
{ return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "ses", OperationName: "GetConfigurationSetEventDestinations", } }
olgaussian_mpc.py
""" MPC with open-loop Gaussian policies """ from .controller import Controller from mjmpc.utils.control_utils import generate_noise, scale_ctrl import copy import numpy as np import scipy.special class OLGaussianMPC(Controller): def
(self, d_state, d_obs, d_action, action_lows, action_highs, horizon, init_cov, init_mean, base_action, num_particles, gamma, n_iters, step_size, filter_coeffs, set_sim_state_fn=None, rollout_fn=None, cov_type='diagonal', sample_mode='mean', batch_size=1, seed=0, use_zero_control_seq=False): """ Parameters __________ base_action : str Action to append at the end when shifting solution to next timestep 'random' : appends random action 'null' : appends zero action 'repeat' : repeats second to last action num_particles : int Number of particles sampled at every iteration """ super(OLGaussianMPC, self).__init__(d_state, d_obs, d_action, action_lows, action_highs, horizon, gamma, n_iters, set_sim_state_fn, rollout_fn, sample_mode, batch_size, seed) self.init_cov = np.array([init_cov] * self.d_action) self.init_mean = init_mean.copy() self.mean_action = init_mean self.base_action = base_action self.num_particles = num_particles self.cov_type = cov_type self.cov_action = np.diag(self.init_cov) self.step_size = step_size self.filter_coeffs = filter_coeffs self.use_zero_control_seq = use_zero_control_seq def _get_next_action(self, state, mode='mean'): if mode == 'mean': next_action = self.mean_action[0].copy() elif mode == 'sample': delta = generate_noise(self.cov_action, self.filter_coeffs, shape=(1, 1), base_seed=self.seed_val + 123*self.num_steps) next_action = self.mean_action[0].copy() + delta.reshape(self.d_action).copy() else: raise ValueError('Unidentified sampling mode in get_next_action') return next_action # def sample_actions(self): # delta = generate_noise(self.cov_action, self.filter_coeffs, # shape=(self.num_particles, self.horizon), # base_seed = self.seed_val + self.num_steps) # act_seq = self.mean_action[None, :, :] + delta # # act_seq = scale_ctrl(act_seq, self.action_lows, self.action_highs) # return np.array(act_seq) def sample_noise(self): delta = generate_noise(self.cov_action, self.filter_coeffs, shape=(self.num_particles, self.horizon), base_seed = self.seed_val + self.num_steps) # act_seq = scale_ctrl(act_seq, self.action_lows, self.action_highs) return delta def generate_rollouts(self, state): """ Samples a batch of actions, rolls out trajectories for each particle and returns the resulting observations, costs, actions Parameters ---------- state : dict or np.ndarray Initial state to set the simulation env to """ self._set_sim_state_fn(copy.deepcopy(state)) #set state of simulation # input('....') delta = self.sample_noise() #sample noise from covariance of current control distribution if self.use_zero_control_seq: delta[-1,:] = -1.0 * self.mean_action.copy() trajectories = self._rollout_fn(self.num_particles, self.horizon, self.mean_action, delta, mode="open_loop") return trajectories def _shift(self): """ Predict good parameters for the next time step by shifting the mean forward one step """ self.mean_action[:-1] = self.mean_action[1:] if self.base_action == 'random': self.mean_action[-1] = np.random.normal(0, self.init_cov, self.d_action) elif self.base_action == 'null': self.mean_action[-1] = np.zeros((self.d_action, )) elif self.base_action == 'repeat': self.mean_action[-1] = self.mean_action[-2] else: raise NotImplementedError("invalid option for base action during shift") def reset(self): self.num_steps = 0 self.mean_action = np.zeros(shape=(self.horizon, self.d_action)) self.cov_action = np.diag(self.init_cov) self.gamma_seq = np.cumprod([1.0] + [self.gamma] * (self.horizon - 1)).reshape(1, self.horizon) def _calc_val(self, cost_seq, act_seq): raise NotImplementedError("_calc_val not implemented")
__init__
log-suppress_20180625192056.js
exports.init = function(console) { if (process.env.NODE_ENV === 'dev' || process.env.NODE_ENV === env) { // console.log('LogSuppress is suppressing all console output.'); if (typeof console.log == 'function') { console.log = function() { }; console.log('You should not see me');
} } };
alarmSettings.ts
import IStorageArea from "../../chrome/storageAreaInterface"; import { STORAGE_ALARM } from "../../common/const"; class
{ private storage: IStorageArea private isMonitoring: boolean constructor(storage: IStorageArea) { this.storage = storage } public async turnMonitoringOn(on: boolean) { this.isMonitoring = on await this._save() } public async isMonitoringOn() { if (this.isMonitoring === undefined) await this._load() return this.isMonitoring } private async _load() { const value = await this.storage.get(STORAGE_ALARM) if (value !== undefined) { this.isMonitoring = value.isMonitoring } else { this.isMonitoring = true } } private async _save() { await this.storage.set(STORAGE_ALARM, { isMonitoring: this.isMonitoring }) } } export default AlarmSettings
AlarmSettings
dhcpoption.go
/* Copyright (c) 2015, Alcatel-Lucent Inc All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package vspk import "github.com/nuagenetworks/go-bambou/bambou" // DHCPOptionIdentity represents the Identity of the object var DHCPOptionIdentity = bambou.Identity{ Name: "dhcpoption", Category: "dhcpoptions", } // DHCPOptionsList represents a list of DHCPOptions type DHCPOptionsList []*DHCPOption // DHCPOptionsAncestor is the interface that an ancestor of a DHCPOption must implement. // An Ancestor is defined as an entity that has DHCPOption as a descendant. // An Ancestor can get a list of its child DHCPOptions, but not necessarily create one. type DHCPOptionsAncestor interface { DHCPOptions(*bambou.FetchingInfo) (DHCPOptionsList, *bambou.Error) } // DHCPOptionsParent is the interface that a parent of a DHCPOption must implement. // A Parent is defined as an entity that has DHCPOption as a child. // A Parent is an Ancestor which can create a DHCPOption. type DHCPOptionsParent interface { DHCPOptionsAncestor CreateDHCPOption(*DHCPOption) *bambou.Error } // DHCPOption represents the model of a dhcpoption type DHCPOption struct { ID string `json:"ID,omitempty"` ParentID string `json:"parentID,omitempty"` ParentType string `json:"parentType,omitempty"` Owner string `json:"owner,omitempty"` Value string `json:"value,omitempty"` LastUpdatedBy string `json:"lastUpdatedBy,omitempty"` LastUpdatedDate string `json:"lastUpdatedDate,omitempty"` ActualType int `json:"actualType,omitempty"` ActualValues []interface{} `json:"actualValues,omitempty"` Length string `json:"length,omitempty"` EmbeddedMetadata []interface{} `json:"embeddedMetadata,omitempty"` EntityScope string `json:"entityScope,omitempty"` CreationDate string `json:"creationDate,omitempty"` Owner string `json:"owner,omitempty"` ExternalID string `json:"externalID,omitempty"` Type string `json:"type,omitempty"` } // NewDHCPOption returns a new *DHCPOption func
() *DHCPOption { return &DHCPOption{} } // Identity returns the Identity of the object. func (o *DHCPOption) Identity() bambou.Identity { return DHCPOptionIdentity } // Identifier returns the value of the object's unique identifier. func (o *DHCPOption) Identifier() string { return o.ID } // SetIdentifier sets the value of the object's unique identifier. func (o *DHCPOption) SetIdentifier(ID string) { o.ID = ID } // Fetch retrieves the DHCPOption from the server func (o *DHCPOption) Fetch() *bambou.Error { return bambou.CurrentSession().FetchEntity(o) } // Save saves the DHCPOption into the server func (o *DHCPOption) Save() *bambou.Error { return bambou.CurrentSession().SaveEntity(o) } // Delete deletes the DHCPOption from the server func (o *DHCPOption) Delete() *bambou.Error { return bambou.CurrentSession().DeleteEntity(o) } // Permissions retrieves the list of child Permissions of the DHCPOption func (o *DHCPOption) Permissions(info *bambou.FetchingInfo) (PermissionsList, *bambou.Error) { var list PermissionsList err := bambou.CurrentSession().FetchChildren(o, PermissionIdentity, &list, info) return list, err } // CreatePermission creates a new child Permission under the DHCPOption func (o *DHCPOption) CreatePermission(child *Permission) *bambou.Error { return bambou.CurrentSession().CreateChild(o, child) } // Metadatas retrieves the list of child Metadatas of the DHCPOption func (o *DHCPOption) Metadatas(info *bambou.FetchingInfo) (MetadatasList, *bambou.Error) { var list MetadatasList err := bambou.CurrentSession().FetchChildren(o, MetadataIdentity, &list, info) return list, err } // CreateMetadata creates a new child Metadata under the DHCPOption func (o *DHCPOption) CreateMetadata(child *Metadata) *bambou.Error { return bambou.CurrentSession().CreateChild(o, child) } // GlobalMetadatas retrieves the list of child GlobalMetadatas of the DHCPOption func (o *DHCPOption) GlobalMetadatas(info *bambou.FetchingInfo) (GlobalMetadatasList, *bambou.Error) { var list GlobalMetadatasList err := bambou.CurrentSession().FetchChildren(o, GlobalMetadataIdentity, &list, info) return list, err } // CreateGlobalMetadata creates a new child GlobalMetadata under the DHCPOption func (o *DHCPOption) CreateGlobalMetadata(child *GlobalMetadata) *bambou.Error { return bambou.CurrentSession().CreateChild(o, child) } // EventLogs retrieves the list of child EventLogs of the DHCPOption func (o *DHCPOption) EventLogs(info *bambou.FetchingInfo) (EventLogsList, *bambou.Error) { var list EventLogsList err := bambou.CurrentSession().FetchChildren(o, EventLogIdentity, &list, info) return list, err }
NewDHCPOption
__init__.py
from .wordcloud import (WordCloud, STOPWORDS, random_color_func, get_single_color_func)
from .color_from_image import ImageColorGenerator __all__ = ['WordCloud', 'STOPWORDS', 'random_color_func', 'get_single_color_func', 'ImageColorGenerator'] from ._version import get_versions __version__ = get_versions()['version'] del get_versions
24.js
/** * Copyright IBM Corp. 2019, 2020 * * This source code is licensed under the Apache-2.0 license found in the * LICENSE file in the root directory of this source tree. * * Code generated by @carbon/icon-build-helpers. DO NOT EDIT. */ 'use strict'; function
(ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } var Icon = require('../Icon-e8f71a9c.js'); require('@carbon/icon-helpers'); require('prop-types'); var React = _interopDefault(require('react')); var _ref2 = /*#__PURE__*/ /*#__PURE__*/ React.createElement("path", { d: "M22.5,23A4.5,4.5,0,1,1,27,18.5,4.505,4.505,0,0,1,22.5,23Zm0-7A2.5,2.5,0,1,0,25,18.5,2.5026,2.5026,0,0,0,22.5,16Z" }); var _ref3 = /*#__PURE__*/ /*#__PURE__*/ React.createElement("path", { d: "M28 8H26V3H19V8H17a2.002 2.002 0 00-2 2V28a2.0023 2.0023 0 002 2H28a2.0027 2.0027 0 002-2V10A2.0023 2.0023 0 0028 8zM21 5h3V8H21zM17 28V10H28l.002 18zM2 14H5V17H2zM7 9H10V12H7zM2 9H5V12H2zM12 4H15V7H12zM7 4H10V7H7zM2 4H5V7H2z" }); var SprayPaint24 = /*#__PURE__*/React.forwardRef(function SprayPaint24(_ref, ref) { var children = _ref.children, rest = Icon._objectWithoutProperties(_ref, ["children"]); return /*#__PURE__*/React.createElement(Icon.Icon, Icon._extends({ width: 24, height: 24, viewBox: "0 0 32 32", xmlns: "http://www.w3.org/2000/svg", fill: "currentColor", ref: ref }, rest), _ref2, _ref3, children); }); module.exports = SprayPaint24;
_interopDefault
BOJ14405.py
s=input();f=True try: while s!="": if s[0]=="p": t=s[:2] if t=="pi": s=s[2:] else:
elif s[0]=="k": t=s[:2] if t=="ka": s=s[2:] else: f=False; break elif s[0]=="c": t=s[:3] if t=="chu": s=s[3:] else: f=False; break else: f=False; break except: f=False if f: print("YES") else: print("NO")
f=False; break
getOKResponseToolsetsXcode.js
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ 'use strict'; /** * Build configuration when Xcode is part of the build steps * */ class
{ /** * Create a GetOKResponseToolsetsXcode. * @property {string} [projectOrWorkspacePath] Xcode project/workspace path * @property {string} [podfilePath] Path to CococaPods file, if present * @property {string} [cartfilePath] Path to Carthage file, if present * @property {string} [provisioningProfileEncoded] * @property {string} [certificateEncoded] * @property {string} [provisioningProfileFileId] * @property {string} [certificateFileId] * @property {string} [provisioningProfileUploadId] * @property {array} [appExtensionProvisioningProfileFiles] * @property {string} [certificateUploadId] * @property {string} [certificatePassword] * @property {string} [scheme] * @property {string} [xcodeVersion] Xcode version used to build. Available * versions can be found in "/xcode_versions" API. Default is latest stable * version, at the time when the configuration is set. * @property {string} [provisioningProfileFilename] * @property {string} [certificateFilename] * @property {string} [teamId] * @property {boolean} [automaticSigning] * @property {string} [xcodeProjectSha] The selected pbxproject hash to the * repositroy * @property {string} [archiveConfiguration] The build configuration of the * target to archive * @property {string} [targetToArchive] The target id of the selected scheme * to archive * @property {boolean} [forceLegacyBuildSystem] Setting this to true forces * the build to use Xcode legacy build system. Otherwise, the setting from * workspace settings is used. * By default new build system is used if workspace setting is not committed * to the repository. Only used for iOS React Native app, with Xcode 10. */ constructor() { } /** * Defines the metadata of GetOKResponseToolsetsXcode * * @returns {object} metadata of GetOKResponseToolsetsXcode * */ mapper() { return { required: false, serializedName: 'GetOKResponse_toolsets_xcode', type: { name: 'Composite', className: 'GetOKResponseToolsetsXcode', modelProperties: { projectOrWorkspacePath: { required: false, serializedName: 'projectOrWorkspacePath', type: { name: 'String' } }, podfilePath: { required: false, serializedName: 'podfilePath', type: { name: 'String' } }, cartfilePath: { required: false, serializedName: 'cartfilePath', type: { name: 'String' } }, provisioningProfileEncoded: { required: false, serializedName: 'provisioningProfileEncoded', type: { name: 'String' } }, certificateEncoded: { required: false, serializedName: 'certificateEncoded', type: { name: 'String' } }, provisioningProfileFileId: { required: false, serializedName: 'provisioningProfileFileId', type: { name: 'String' } }, certificateFileId: { required: false, serializedName: 'certificateFileId', type: { name: 'String' } }, provisioningProfileUploadId: { required: false, serializedName: 'provisioningProfileUploadId', type: { name: 'String' } }, appExtensionProvisioningProfileFiles: { required: false, serializedName: 'appExtensionProvisioningProfileFiles', type: { name: 'Sequence', element: { required: false, serializedName: 'GetOKResponseToolsetsXcodeAppExtensionProvisioningProfileFilesItemElementType', type: { name: 'Composite', className: 'GetOKResponseToolsetsXcodeAppExtensionProvisioningProfileFilesItem' } } } }, certificateUploadId: { required: false, serializedName: 'certificateUploadId', type: { name: 'String' } }, certificatePassword: { required: false, serializedName: 'certificatePassword', type: { name: 'String' } }, scheme: { required: false, serializedName: 'scheme', type: { name: 'String' } }, xcodeVersion: { required: false, serializedName: 'xcodeVersion', type: { name: 'String' } }, provisioningProfileFilename: { required: false, serializedName: 'provisioningProfileFilename', type: { name: 'String' } }, certificateFilename: { required: false, serializedName: 'certificateFilename', type: { name: 'String' } }, teamId: { required: false, serializedName: 'teamId', type: { name: 'String' } }, automaticSigning: { required: false, serializedName: 'automaticSigning', type: { name: 'Boolean' } }, xcodeProjectSha: { required: false, serializedName: 'xcodeProjectSha', type: { name: 'String' } }, archiveConfiguration: { required: false, serializedName: 'archiveConfiguration', type: { name: 'String' } }, targetToArchive: { required: false, serializedName: 'targetToArchive', type: { name: 'String' } }, forceLegacyBuildSystem: { required: false, serializedName: 'forceLegacyBuildSystem', type: { name: 'Boolean' } } } } }; } } module.exports = GetOKResponseToolsetsXcode;
GetOKResponseToolsetsXcode
blk_monitor.go
/* Package repository implements repository for handling fast and efficient access to data required by the resolvers of the API server. Internally it utilizes RPC to access Opera/Lachesis full node for blockchain interaction. Mongo database for fast, robust and scalable off-chain data storage, especially for aggregated and pre-calculated data mining results. BigCache for in-memory object storage to speed up loading of frequently accessed entities. */ package repository import ( "context" "fantom-api-graphql/internal/logger" "fantom-api-graphql/internal/types" ftm "github.com/ethereum/go-ethereum/rpc" "sync" "time" ) // monBlocksBufferCapacity is the number of new blocks kept in the block processing channel. const monBlocksBufferCapacity = 50000 // blockMonitor represents a subscription processor capturing new blockchain blocks. type blockMonitor struct { service txChan chan *eventTransaction blkChan chan types.Block procChan chan types.Block sigProcStop chan bool reScan chan bool con *ftm.Client sub *ftm.ClientSubscription // event broadcast channels onBlock chan *types.Block onTransaction chan *types.Transaction } // NewBlockMonitor creates a new block monitor instance. func NewBlockMonitor(con *ftm.Client, buffer chan *eventTransaction, rescan chan bool, repo Repository, log logger.Logger, wg *sync.WaitGroup) *blockMonitor { // create new blockScanner instance return &blockMonitor{ service: newService("block monitor", repo, log, wg), txChan: buffer, reScan: rescan, con: con, sigProcStop: make(chan bool, 1), blkChan: make(chan types.Block, monBlocksBufferCapacity), procChan: make(chan types.Block, monBlocksBufferCapacity), } } // run starts monitoring for new transaction func (bm *blockMonitor) run() { // start go routine for processing bm.wg.Add(1) go bm.process() // start go routine for subscription reader bm.wg.Add(1) go bm.monitor() } // subscribe opens a subscription on the connected Opera/Lachesis full node. func (bm *blockMonitor) subscribe() error { // open subscription sub, err := bm.con.Subscribe(context.Background(), "eth", bm.blkChan, "newHeads") if err != nil { bm.log.Error("can not subscribe to blockchain") bm.log.Error(err) return err } // keep the subscription bm.sub = sub return nil } // monitor consumes new blocks from the block channel and route them to target functions. func (bm *blockMonitor) monitor() { // inform about the monitor bm.log.Notice("block monitor is running") // don't forget to sign off after we are done defer func() { // make sure to spread the word bm.sigProcStop <- true // unsubscribe bm.log.Notice("block monitor unsubscribed") if bm.sub != nil { bm.sub.Unsubscribe() bm.sub = nil } // log finish bm.log.Notice("block monitor is closed") // signal to wait group we are done bm.wg.Done() }() // open subscription if err := bm.subscribe(); err != nil { bm.log.Criticalf("block monitor subscription failed; %s", err.Error()) return } // loop here for { select { case <-bm.sigStop: return case err, ok := <-bm.sub.Err(): // do we have a working channel? if !ok { bm.log.Notice("block monitor subscription has been closed") return } // log issue bm.log.Error("block monitor subscription error; %s", err.Error()) // signal orchestrator to schedule re-scan and restart subscription bm.reScan <- true return case blk := <-bm.blkChan: // log the action bm.log.Debugf("new block #%d arrived", uint64(blk.Number)) // extract full block information block, err := bm.repo.BlockByNumber(&blk.Number) if err != nil { bm.log.Errorf("can not process block; %s", err.Error()) continue } // push for processing bm.procChan <- *block // notify event
// add to the ring cache bm.repo.CacheBlock(block) } } } // process pulls blocks from processing queue and act on it as needed. func (bm *blockMonitor) process() { // inform about the monitor bm.log.Notice("block processor is running") // don't forget to sign off after we are done defer func() { // log finish bm.log.Notice("block processor is closed") // signal to wait group we are done bm.wg.Done() }() for { select { case <-bm.sigProcStop: return case block, ok := <-bm.procChan: // if the channel is closed, no more data will arrive here if !ok { return } // any transactions in the block? process them if block.Txs != nil && len(block.Txs) > 0 { bm.handle(&block) } } } } // handle pulls transactions from the incoming block to be processed in the trx dispatch queue. func (bm *blockMonitor) handle(block *types.Block) { // log action bm.log.Debugf("block #%d has %d transactions to process", uint64(block.Number), len(block.Txs)) // loop transaction hashes for i, hash := range block.Txs { // log action bm.log.Debugf("processing transaction #%d of block #%d", i, uint64(block.Number)) // get transaction trx, err := bm.repo.Transaction(hash) if err != nil { bm.log.Errorf("error getting transaction detail; %s", err.Error()) continue } // update time stamp using the block data trx.TimeStamp = time.Unix(int64(block.TimeStamp), 0) // prep sending struct and push it to the queue event := eventTransaction{block: block, trx: trx} bm.txChan <- &event // notify new transaction bm.onTransaction <- trx } // log action bm.log.Debugf("block #%d processed", uint64(block.Number)) }
bm.onBlock <- block
telemetry.rs
// The compiler cannot figure out that the `use crate::*` // import is actually used, and putting the allow attribute on that import in // particular appears to do nothing... T_T #![allow(unused_imports)] mod log_stream; mod tcp_errors; use crate::*; use std::io::Read; struct Fixture { client: client::Client, metrics: client::Client, proxy: proxy::Listening, _profile: controller::ProfileSender, dst_tx: Option<controller::DstSender>, labels: metrics::Labels, tcp_src_labels: metrics::Labels, tcp_dst_labels: metrics::Labels, } struct TcpFixture { client: tcp::TcpClient, metrics: client::Client, proxy: proxy::Listening, profile: controller::ProfileSender, dst: controller::DstSender, src_labels: metrics::Labels, dst_labels: metrics::Labels, } impl Fixture { async fn inbound() -> Self { info!("running test server"); Fixture::inbound_with_server(server::new().route("/", "hello").run().await).await } async fn outbound() -> Self { info!("running test server"); Fixture::outbound_with_server(server::new().route("/", "hello").run().await).await } async fn inbound_with_server(srv: server::Listening) -> Self { let ctrl = controller::new(); let orig_dst = srv.addr; let _profile = ctrl.profile_tx_default(orig_dst, "tele.test.svc.cluster.local"); let proxy = proxy::new() .controller(ctrl.run().await) .inbound(srv) .run() .await; let metrics = client::http1(proxy.admin, "localhost"); let client = client::new(proxy.inbound, "tele.test.svc.cluster.local"); let tcp_dst_labels = metrics::labels().label("direction", "inbound"); let tcp_src_labels = tcp_dst_labels.clone().label("target_addr", orig_dst); let labels = tcp_dst_labels .clone() .label("authority", "tele.test.svc.cluster.local"); let tcp_src_labels = tcp_src_labels.label("peer", "src"); let tcp_dst_labels = tcp_dst_labels.label("peer", "dst"); Fixture { client, metrics, proxy, _profile, dst_tx: None, labels, tcp_src_labels, tcp_dst_labels, } } async fn outbound_with_server(srv: server::Listening) -> Self { let ctrl = controller::new(); let orig_dst = srv.addr; let _profile = ctrl.profile_tx_default(orig_dst, "tele.test.svc.cluster.local"); let authority = format!("tele.test.svc.cluster.local:{}", orig_dst.port()); let dest = ctrl.destination_tx(authority.clone()); dest.send_addr(srv.addr); let proxy = proxy::new() .controller(ctrl.run().await) .outbound(srv) .run() .await; let metrics = client::http1(proxy.admin, "localhost"); let client = client::new(proxy.outbound, "tele.test.svc.cluster.local"); let tcp_labels = metrics::labels() .label("direction", "outbound") .label("target_addr", orig_dst); let labels = tcp_labels.clone().label("authority", authority); let tcp_src_labels = tcp_labels.clone().label("peer", "src"); let tcp_dst_labels = tcp_labels.label("peer", "dst"); Fixture { client, metrics, proxy, _profile, dst_tx: Some(dest), labels, tcp_src_labels, tcp_dst_labels, } } } impl TcpFixture { const HELLO_MSG: &'static str = "custom tcp hello\n"; const BYE_MSG: &'static str = "custom tcp bye"; async fn server() -> server::Listening { server::tcp() .accept(move |read| { assert_eq!(read, Self::HELLO_MSG.as_bytes()); TcpFixture::BYE_MSG }) .accept(move |read| { assert_eq!(read, Self::HELLO_MSG.as_bytes()); TcpFixture::BYE_MSG }) .run() .await } async fn inbound() -> Self { let srv = TcpFixture::server().await; let ctrl = controller::new(); let orig_dst = srv.addr; let profile = ctrl.profile_tx_default(orig_dst, &orig_dst.to_string()); let dst = ctrl.destination_tx(orig_dst.to_string()); dst.send_addr(orig_dst); let proxy = proxy::new() .controller(ctrl.run().await) .inbound(srv) .run() .await; let client = client::tcp(proxy.inbound); let metrics = client::http1(proxy.admin, "localhost"); let src_labels = metrics::labels() .label("direction", "inbound") .label("peer", "src") .label("target_addr", orig_dst) .label("srv_kind", "default") .label("srv_name", "all-unauthenticated"); let dst_labels = metrics::labels() .label("direction", "inbound") .label("peer", "dst") .label("tls", "no_identity") .label("no_tls_reason", "loopback"); TcpFixture { client, metrics, proxy, profile, dst, src_labels, dst_labels, } } async fn outbound() -> Self { let srv = TcpFixture::server().await; let ctrl = controller::new(); let orig_dst = srv.addr; let profile = ctrl.profile_tx_default(orig_dst, &orig_dst.to_string()); let dst = ctrl.destination_tx(orig_dst.to_string()); dst.send_addr(orig_dst); let proxy = proxy::new() .controller(ctrl.run().await) .outbound(srv) .run() .await; let client = client::tcp(proxy.outbound); let metrics = client::http1(proxy.admin, "localhost"); let src_labels = metrics::labels() .label("direction", "outbound") .label("peer", "src") .label("tls", "no_identity") .label("no_tls_reason", "loopback") .label("target_addr", orig_dst); let dst_labels = metrics::labels() .label("direction", "outbound") .label("peer", "dst"); TcpFixture { client, metrics, proxy, profile, dst, src_labels, dst_labels, } } } #[tokio::test] async fn admin_request_count() { let _trace = trace_init(); let fixture = Fixture::inbound().await; let metrics = fixture.metrics; let metric = metrics::metric("request_total") .label("direction", "inbound") .label("target_addr", metrics.target_addr()) .value(1usize); // We can't assert that the metric is not present, since `GET /metrics` // will bump the request count, lol metric.assert_in(&metrics).await; } #[tokio::test] async fn admin_transport_metrics() { let _trace = trace_init(); let fixture = Fixture::inbound().await; let metrics = fixture.metrics; let labels = metrics::labels() .label("direction", "inbound") .label("target_addr", metrics.target_addr()) .label("peer", "src"); let mut open_total = labels.metric("tcp_open_total").value(1usize); open_total.assert_in(&metrics).await; assert!( open_total .clone() .label("peer", "dst") .is_not_in(metrics.get("/metrics").await), "peer=\"dst\" metrics don't make sense for the admin server" ); let mut close_total = labels.metric("tcp_close_total"); assert!( close_total.is_not_in(metrics.get("/metrics").await), "client connection hasn't been closed yet" ); // Close the connection and reconnect. let metrics = metrics.shutdown().await.reconnect(); close_total.set_value(1).assert_in(&metrics).await; assert!( close_total .label("peer", "dst") .is_not_in(metrics.get("/metrics").await), "peer=\"dst\" metrics don't make sense for the admin server" ); open_total.set_value(2usize).assert_in(&metrics).await; let open_gauge = labels.metric("tcp_open_connections"); open_gauge.clone().value(1usize).assert_in(&metrics).await; assert!( open_gauge .label("peer", "dst") .is_not_in(metrics.get("/metrics").await), "peer=\"dst\" metrics don't make sense for the admin server" ); } #[tokio::test] async fn metrics_endpoint_inbound_request_count() { test_http_count("request_total", Fixture::inbound()).await; } #[tokio::test] async fn metrics_endpoint_outbound_request_count() { test_http_count("request_total", Fixture::outbound()).await } #[tokio::test] async fn metrics_endpoint_inbound_response_count() { test_http_count("response_total", Fixture::inbound()).await; } #[tokio::test] async fn metrics_endpoint_outbound_response_count() { test_http_count("response_total", Fixture::outbound()).await } async fn test_http_count(metric: &str, fixture: impl Future<Output = Fixture>) { let _trace = trace_init(); let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, labels, .. } = fixture.await; let metric = labels.metric(metric); assert!(metric.is_not_in(metrics.get("/metrics").await)); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); // after seeing a request, the request count should be 1. metric.value(1u64).assert_in(&metrics).await; } mod response_classification { use super::Fixture; use crate::*; use tracing::info; const REQ_STATUS_HEADER: &str = "x-test-status-requested"; const REQ_GRPC_STATUS_HEADER: &str = "x-test-grpc-status-requested"; const STATUSES: [http::StatusCode; 6] = [ http::StatusCode::OK, http::StatusCode::NOT_MODIFIED, http::StatusCode::BAD_REQUEST, http::StatusCode::IM_A_TEAPOT, http::StatusCode::GATEWAY_TIMEOUT, http::StatusCode::INTERNAL_SERVER_ERROR, ]; async fn make_test_server() -> server::Listening { fn parse_header(headers: &http::HeaderMap, which: &str) -> Option<http::StatusCode> { headers.get(which).map(|val| { val.to_str() .expect("requested status should be ascii") .parse::<http::StatusCode>() .expect("requested status should be numbers") }) } info!("running test server"); server::new() .route_fn("/", move |req| { let headers = req.headers(); let status = parse_header(headers, REQ_STATUS_HEADER).unwrap_or(http::StatusCode::OK); let grpc_status = parse_header(headers, REQ_GRPC_STATUS_HEADER); let mut rsp = if let Some(_grpc_status) = grpc_status { // TODO: tests for grpc statuses unreachable!("not called in test") } else { Response::new("".into()) }; *rsp.status_mut() = status; rsp }) .run() .await } async fn test_http(fixture: impl Future<Output = Fixture>) { let _trace = trace_init(); let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, labels, .. } = fixture.await; for (i, status) in STATUSES.iter().enumerate() { let request = client .request( client .request_builder("/") .header(REQ_STATUS_HEADER, status.as_str()) .method("GET"), ) .await .unwrap(); assert_eq!(&request.status(), status); for status in &STATUSES[0..i] { // assert that the current status code is incremented, *and* that // all previous requests are *not* incremented. labels .metric("response_total") .label("status_code", status.as_u16()) .label( "classification", if status.is_server_error() { "failure" } else { "success" }, ) .value(1u64) .assert_in(&metrics) .await; } } } #[tokio::test] async fn inbound_http() { let fixture = async { Fixture::inbound_with_server(make_test_server().await).await }; test_http(fixture).await } #[tokio::test] async fn outbound_http() { let fixture = async { Fixture::outbound_with_server(make_test_server().await).await }; test_http(fixture).await } } async fn test_response_latency<F>(mk_fixture: impl Fn(server::Listening) -> F) where F: Future<Output = Fixture>, { let _trace = trace_init(); info!("running test server"); let srv = server::new() .route_with_latency("/hey", "hello", Duration::from_millis(500)) .route_with_latency("/hi", "good morning", Duration::from_millis(40)) .run() .await; let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, labels, .. } = mk_fixture(srv).await; info!("client.get(/hey)"); assert_eq!(client.get("/hey").await, "hello"); // assert the >=1000ms bucket is incremented by our request with 500ms // extra latency. let labels = labels.label("status_code", 200); let mut bucket_1000 = labels .clone() .metric("response_latency_ms_bucket") .label("le", 1000) .value(1u64); let mut bucket_50 = labels.metric("response_latency_ms_bucket").label("le", 50); let mut count = labels.metric("response_latency_ms_count").value(1u64); bucket_1000.assert_in(&metrics).await; // the histogram's count should be 1. count.assert_in(&metrics).await; // TODO: we're not going to make any assertions about the // response_latency_ms_sum stat, since its granularity depends on the actual // observed latencies, which may vary a bit. we could make more reliable // assertions about that stat if we were using a mock timer, though, as the // observed latency values would be predictable. info!("client.get(/hi)"); assert_eq!(client.get("/hi").await, "good morning"); // request with 40ms extra latency should fall into the 50ms bucket. bucket_50.set_value(1u64).assert_in(&metrics).await; // 1000ms bucket should be incremented as well, since it counts *all* // observations less than or equal to 1000ms, even if they also increment // other buckets. bucket_1000.set_value(2u64).assert_in(&metrics).await; // the histogram's total count should be 2. count.set_value(2u64).assert_in(&metrics).await; info!("client.get(/hi)"); assert_eq!(client.get("/hi").await, "good morning"); // request with 40ms extra latency should fall into the 50ms bucket. bucket_50.set_value(2u64).assert_in(&metrics).await; // 1000ms bucket should be incremented as well. bucket_1000.set_value(3).assert_in(&metrics).await; // the histogram's total count should be 3. count.set_value(3).assert_in(&metrics).await; info!("client.get(/hey)"); assert_eq!(client.get("/hey").await, "hello"); // 50ms bucket should be un-changed by the request with 500ms latency. bucket_50.assert_in(&metrics).await; // 1000ms bucket should be incremented. bucket_1000.set_value(4).assert_in(&metrics).await; // the histogram's total count should be 4. count.set_value(4).assert_in(&metrics).await; } // Ignore this test on CI, because our method of adding latency to requests // (calling `thread::sleep`) is likely to be flakey on Travis. // Eventually, we can add some kind of mock timer system for simulating latency // more reliably, and re-enable this test. #[tokio::test] #[cfg_attr(not(feature = "flakey-in-ci"), ignore)] async fn inbound_response_latency() { test_response_latency(Fixture::inbound_with_server).await } // Ignore this test on CI, because our method of adding latency to requests // (calling `thread::sleep`) is likely to be flakey on Travis. // Eventually, we can add some kind of mock timer system for simulating latency // more reliably, and re-enable this test. #[tokio::test] #[cfg_attr(not(feature = "flakey-in-ci"), ignore)] async fn outbound_response_latency() { test_response_latency(Fixture::outbound_with_server).await } // Tests for destination labels provided by control plane service discovery. mod outbound_dst_labels { use super::Fixture; use crate::*; use controller::DstSender; async fn fixture(dest: &str) -> (Fixture, SocketAddr) { info!("running test server"); let srv = server::new().route("/", "hello").run().await; let addr = srv.addr; let ctrl = controller::new(); let _profile = ctrl.profile_tx_default(addr, dest); let dest_and_port = format!("{}:{}", dest, addr.port()); let dst_tx = ctrl.destination_tx(dest_and_port.clone()); let proxy = proxy::new() .controller(ctrl.run().await) .outbound(srv) .run() .await; let metrics = client::http1(proxy.admin, "localhost"); let client = client::new(proxy.outbound, dest); let tcp_labels = metrics::labels() .label("direction", "outbound") .label("target_addr", addr); let labels = tcp_labels.clone().label("authority", dest_and_port); let f = Fixture { client, metrics, proxy, _profile, labels, tcp_src_labels: tcp_labels.clone(), tcp_dst_labels: tcp_labels, dst_tx: Some(dst_tx), }; (f, addr) } #[tokio::test] async fn multiple_addr_labels() { let _trace = trace_init(); let ( Fixture { client, metrics, proxy: _proxy, _profile, dst_tx, labels, .. }, addr, ) = fixture("labeled.test.svc.cluster.local").await; let dst_tx = dst_tx.unwrap(); { let mut labels = HashMap::new(); labels.insert("addr_label1".to_owned(), "foo".to_owned()); labels.insert("addr_label2".to_owned(), "bar".to_owned()); dst_tx.send_labeled(addr, labels, HashMap::new()); } info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); let labels = labels .label("dst_addr_label1", "foo") .label("dst_addr_label2", "bar"); for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels.metric(metric).assert_in(&metrics).await; } } #[tokio::test] async fn multiple_addrset_labels() { let _trace = trace_init(); let ( Fixture { client, metrics, proxy: _proxy, _profile, dst_tx, labels, .. }, addr, ) = fixture("labeled.test.svc.cluster.local").await; let dst_tx = dst_tx.unwrap(); { let mut labels = HashMap::new(); labels.insert("set_label1".to_owned(), "foo".to_owned()); labels.insert("set_label2".to_owned(), "bar".to_owned()); dst_tx.send_labeled(addr, HashMap::new(), labels); } info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); let labels = labels .label("dst_set_label1", "foo") .label("dst_set_label2", "bar"); for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels.metric(metric).assert_in(&metrics).await; } } #[tokio::test] async fn labeled_addr_and_addrset() { let _trace = trace_init(); let ( Fixture { client, metrics, proxy: _proxy, _profile, dst_tx, labels, .. }, addr, ) = fixture("labeled.test.svc.cluster.local").await; let dst_tx = dst_tx.unwrap(); { let mut alabels = HashMap::new(); alabels.insert("addr_label".to_owned(), "foo".to_owned()); let mut slabels = HashMap::new(); slabels.insert("set_label".to_owned(), "bar".to_owned()); dst_tx.send_labeled(addr, alabels, slabels); } info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); let labels = labels .label("dst_addr_label", "foo") .label("dst_set_label", "bar"); for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels.metric(metric).assert_in(&metrics).await; } } // Ignore this test on CI, as it may fail due to the reduced concurrency // on CI containers causing the proxy to see both label updates from // the mock controller before the first request has finished. // See linkerd/linkerd2#751 #[tokio::test] #[cfg_attr(not(feature = "flakey-in-ci"), ignore)] async fn controller_updates_addr_labels() { let _trace = trace_init(); info!("running test server"); let ( Fixture { client, metrics, proxy: _proxy, _profile, dst_tx, labels, .. }, addr, ) = fixture("labeled.test.svc.cluster.local").await; let dst_tx = dst_tx.unwrap(); { let mut alabels = HashMap::new(); alabels.insert("addr_label".to_owned(), "foo".to_owned()); let mut slabels = HashMap::new(); slabels.insert("set_label".to_owned(), "unchanged".to_owned()); dst_tx.send_labeled(addr, alabels, slabels); } let labels1 = labels .clone() .label("dst_addr_label", "foo") .label("dst_set_label", "unchanged"); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); // the first request should be labeled with `dst_addr_label="foo"` for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels1.metric(metric).value(1u64).assert_in(&metrics).await; } { let mut alabels = HashMap::new(); alabels.insert("addr_label".to_owned(), "bar".to_owned()); let mut slabels = HashMap::new(); slabels.insert("set_label".to_owned(), "unchanged".to_owned()); dst_tx.send_labeled(addr, alabels, slabels); } let labels2 = labels .label("dst_addr_label", "bar") .label("dst_set_label", "unchanged"); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); // the second request should increment stats labeled with `dst_addr_label="bar"` // the first request should be labeled with `dst_addr_label="foo"` for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels1.metric(metric).value(1u64).assert_in(&metrics).await; } // stats recorded from the first request should still be present. // the first request should be labeled with `dst_addr_label="foo"` for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels2.metric(metric).value(1u64).assert_in(&metrics).await; } } // FIXME(ver) this test was marked flakey, but now it consistently fails. #[ignore] #[tokio::test] async fn controller_updates_set_labels() { let _trace = trace_init(); info!("running test server"); let ( Fixture { client, metrics, proxy: _proxy, _profile, dst_tx, labels, .. }, addr, ) = fixture("labeled.test.svc.cluster.local").await; let dst_tx = dst_tx.unwrap(); { let alabels = HashMap::new(); let mut slabels = HashMap::new(); slabels.insert("set_label".to_owned(), "foo".to_owned()); dst_tx.send_labeled(addr, alabels, slabels); } let labels1 = labels.clone().label("dst_set_label", "foo"); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); // the first request should be labeled with `dst_addr_label="foo" for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels1.metric(metric).value(1u64).assert_in(&client).await; } { let alabels = HashMap::new(); let mut slabels = HashMap::new(); slabels.insert("set_label".to_owned(), "bar".to_owned()); dst_tx.send_labeled(addr, alabels, slabels); } let labels2 = labels.label("dst_set_label", "bar"); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); // the second request should increment stats labeled with `dst_addr_label="bar"` for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels2.metric(metric).value(1u64).assert_in(&metrics).await; } // stats recorded from the first request should still be present. for &metric in &[ "request_total", "response_total", "response_latency_ms_count", ] { labels1.metric(metric).value(1u64).assert_in(&metrics).await; } } } #[tokio::test] async fn metrics_have_no_double_commas() { // Test for regressions to linkerd/linkerd2#600. let _trace = trace_init(); info!("running test server"); let inbound_srv = server::new().route("/hey", "hello").run().await; let outbound_srv = server::new().route("/hey", "hello").run().await; let ctrl = controller::new(); let _profile_in = ctrl.profile_tx_default("tele.test.svc.cluster.local", "tele.test.svc.cluster.local"); let _profile_out = ctrl.profile_tx_default(outbound_srv.addr, "tele.test.svc.cluster.local"); let out_dest = ctrl.destination_tx(format!( "tele.test.svc.cluster.local:{}", outbound_srv.addr.port() )); out_dest.send_addr(outbound_srv.addr); let in_dest = ctrl.destination_tx(format!( "tele.test.svc.cluster.local:{}", inbound_srv.addr.port() )); in_dest.send_addr(inbound_srv.addr); let proxy = proxy::new() .controller(ctrl.run().await) .inbound(inbound_srv) .outbound(outbound_srv) .run() .await; let client = client::new(proxy.inbound, "tele.test.svc.cluster.local"); let metrics = client::http1(proxy.admin, "localhost"); let scrape = metrics.get("/metrics").await; assert!(!scrape.contains(",,")); info!("inbound.get(/hey)"); assert_eq!(client.get("/hey").await, "hello"); let scrape = metrics.get("/metrics").await; assert!(!scrape.contains(",,"), "inbound metrics had double comma"); let client = client::new(proxy.outbound, "tele.test.svc.cluster.local"); info!("outbound.get(/hey)"); assert_eq!(client.get("/hey").await, "hello"); let scrape = metrics.get("/metrics").await; assert!(!scrape.contains(",,"), "outbound metrics had double comma"); } #[tokio::test] async fn metrics_has_start_time() { let Fixture { metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, .. } = Fixture::inbound().await; let uptime_regex = regex::Regex::new(r"process_start_time_seconds \d+") .expect("compiling regex shouldn't fail"); assert_eventually!(uptime_regex.find(&metrics.get("/metrics").await).is_some()) } mod transport { use super::*; use crate::*; async fn test_http_connect( fixture: impl Future<Output = Fixture>, extra_labels: metrics::Labels, ) { let _trace = trace_init(); let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, tcp_dst_labels, .. } = fixture.await; let labels = tcp_dst_labels.and(extra_labels); let opens = labels.metric("tcp_open_total").value(1u64); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); opens.assert_in(&metrics).await; info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); // Pooled connection doesn't increment the metric. opens.assert_in(&metrics).await; } async fn test_http_accept( fixture: impl Future<Output = Fixture>, extra_labels: metrics::Labels, ) { let _trace = trace_init(); let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, tcp_src_labels, .. } = fixture.await; let labels = tcp_src_labels.and(extra_labels); let mut opens = labels.metric("tcp_open_total").value(1u64); let mut closes = labels .metric("tcp_close_total") .label("errno", "") .value(1u64); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); opens.assert_in(&metrics).await; // Shut down the client to force the connection to close. let new_client = client.shutdown().await; closes.assert_in(&metrics).await; // create a new client to force a new connection let client = new_client.reconnect(); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); opens.set_value(2u64).assert_in(&metrics).await; // Shut down the client to force the connection to close. client.shutdown().await; closes.set_value(2u64).assert_in(&metrics).await; } async fn test_tcp_connect(fixture: impl Future<Output = TcpFixture>) { let _trace = trace_init(); let TcpFixture { client, metrics, proxy: _proxy, dst: _dst, profile: _profile, dst_labels, .. } = fixture.await; let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); dst_labels .metric("tcp_open_total") .value(1u64) .assert_in(&metrics) .await; } async fn test_tcp_accept(fixture: impl Future<Output = TcpFixture>) { let _trace = trace_init(); let TcpFixture { client, metrics, proxy: _proxy, dst: _dst, profile: _profile, src_labels, .. } = fixture.await; let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); let mut opens = src_labels.metric("tcp_open_total").value(1u64); let mut closes = src_labels .metric("tcp_close_total") .label("errno", "") .value(1u64); opens.assert_in(&metrics).await; tcp_client.shutdown().await; closes.assert_in(&metrics).await; let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); opens.set_value(2u64).assert_in(&metrics).await; tcp_client.shutdown().await; closes.set_value(2u64).assert_in(&metrics).await; } async fn test_write_bytes_total(fixture: impl Future<Output = TcpFixture>) { let _trace = trace_init(); let TcpFixture { client, metrics, proxy: _proxy, dst: _dst, profile: _profile, src_labels, dst_labels, } = fixture.await; let src = src_labels .metric("tcp_write_bytes_total") .value(TcpFixture::BYE_MSG.len()); let dst = dst_labels .metric("tcp_write_bytes_total") .value(TcpFixture::HELLO_MSG.len()); let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); tcp_client.shutdown().await; src.assert_in(&metrics).await; dst.assert_in(&metrics).await; } async fn test_read_bytes_total(fixture: impl Future<Output = TcpFixture>) { let _trace = trace_init(); let TcpFixture { client, metrics, proxy: _proxy, dst: _dst, profile: _profile, src_labels, dst_labels, } = fixture.await; let src = src_labels .metric("tcp_read_bytes_total") .value(TcpFixture::HELLO_MSG.len()); let dst = dst_labels .metric("tcp_read_bytes_total") .value(TcpFixture::BYE_MSG.len()); let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); tcp_client.shutdown().await; src.assert_in(&metrics).await; dst.assert_in(&metrics).await; } async fn test_tcp_open_conns(fixture: impl Future<Output = TcpFixture>) { let _trace = trace_init(); let fixture = fixture.await; let client = fixture.client; let metrics = fixture.metrics; let mut open_conns = fixture.src_labels.metric("tcp_open_connections"); let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); open_conns.set_value(1).assert_in(&metrics).await; tcp_client.shutdown().await; open_conns.set_value(0).assert_in(&metrics).await; let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; assert_eq!(tcp_client.read().await, TcpFixture::BYE_MSG.as_bytes()); open_conns.set_value(1).assert_in(&metrics).await; tcp_client.shutdown().await; open_conns.set_value(0).assert_in(&metrics).await; } async fn test_http_open_conns( fixture: impl Future<Output = Fixture>, extra_labels: metrics::Labels, ) { let _trace = trace_init(); let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, tcp_src_labels, .. } = fixture.await; let mut open_conns = tcp_src_labels .and(extra_labels) .metric("tcp_open_connections"); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); open_conns.set_value(1).assert_in(&metrics).await; // Shut down the client to force the connection to close. let new_client = client.shutdown().await; open_conns.set_value(0).assert_in(&metrics).await; // create a new client to force a new connection let client = new_client.reconnect(); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); open_conns.set_value(1).assert_in(&metrics).await; // Shut down the client to force the connection to close. client.shutdown().await; open_conns.set_value(0).assert_in(&metrics).await; } #[tokio::test] async fn inbound_http_accept() { test_http_accept(Fixture::inbound(), metrics::labels()).await; } #[tokio::test] async fn inbound_http_connect() { test_http_connect( Fixture::inbound(), metrics::labels() .label("tls", "no_identity") .label("no_tls_reason", "loopback"), ) .await; } #[tokio::test] async fn outbound_http_accept() { test_http_accept( Fixture::outbound(), metrics::labels() .label("tls", "no_identity") .label("no_tls_reason", "loopback"), ) .await; } #[tokio::test] async fn outbound_http_connect() { test_http_connect(Fixture::outbound(), metrics::labels()).await; } #[tokio::test] async fn inbound_tcp_connect() { test_tcp_connect(TcpFixture::inbound()).await } #[tokio::test] async fn inbound_tcp_accept() { test_tcp_accept(TcpFixture::inbound()).await } #[tokio::test] async fn outbound_tcp_connect() { test_tcp_connect(TcpFixture::outbound()).await; } #[cfg_attr(not(feature = "flakey-in-coverage"), ignore)] #[tokio::test] async fn outbound_tcp_accept() { test_tcp_accept(TcpFixture::outbound()).await; } #[tokio::test] async fn inbound_tcp_write_bytes_total() { test_write_bytes_total(TcpFixture::inbound()).await } #[tokio::test] async fn inbound_tcp_read_bytes_total() { test_read_bytes_total(TcpFixture::inbound()).await } #[tokio::test] async fn outbound_tcp_write_bytes_total() { test_write_bytes_total(TcpFixture::outbound()).await } #[tokio::test] async fn outbound_tcp_read_bytes_total() { test_read_bytes_total(TcpFixture::outbound()).await } #[cfg_attr(not(feature = "flakey-in-coverage"), ignore)] #[tokio::test] async fn outbound_tcp_open_connections() { test_tcp_open_conns(TcpFixture::outbound()).await } #[tokio::test] async fn inbound_tcp_open_connections() { test_tcp_open_conns(TcpFixture::inbound()).await } #[tokio::test] async fn outbound_http_tcp_open_connections() { test_http_open_conns( Fixture::outbound(), metrics::labels() .label("direction", "outbound") .label("tls", "no_identity") .label("no_tls_reason", "loopback"), ) .await } #[tokio::test] async fn inbound_http_tcp_open_connections() { test_http_open_conns( Fixture::inbound(), metrics::labels().label("direction", "inbound"), ) .await } } // linkerd/linkerd2#613 #[tokio::test] async fn metrics_compression() { let _trace = trace_init(); let Fixture { client, metrics, proxy: _proxy, _profile, dst_tx: _dst_tx, labels, .. } = Fixture::inbound().await; let do_scrape = |encoding: &str| { let req = metrics.request( metrics
.header("Accept-Encoding", encoding), ); let encoding = encoding.to_owned(); async move { let resp = req.await.expect("scrape"); { // create a new scope so we can release our borrow on `resp` before // getting the body let content_encoding = resp.headers().get("content-encoding").as_ref().map(|val| { val.to_str() .expect("content-encoding value should be ascii") }); assert_eq!( content_encoding, Some("gzip"), "unexpected Content-Encoding {:?} (requested Accept-Encoding: {})", content_encoding, encoding.as_str() ); } let mut body = hyper::body::aggregate(resp.into_body()) .await .expect("response body concat"); let mut decoder = flate2::read::GzDecoder::new(std::io::Cursor::new( body.copy_to_bytes(body.remaining()), )); let mut scrape = String::new(); decoder.read_to_string(&mut scrape).unwrap_or_else(|_| { panic!("decode gzip (requested Accept-Encoding: {})", encoding) }); scrape } }; let encodings = &[ "gzip", "deflate, gzip", "gzip,deflate", "brotli,gzip,deflate", ]; info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); let mut metric = labels .metric("response_latency_ms_count") .label("status_code", 200) .value(1u64); for &encoding in encodings { assert_eventually_contains!(do_scrape(encoding).await, &metric); } info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); for &encoding in encodings { assert_eventually_contains!(do_scrape(encoding).await, metric.set_value(2u64)); } }
.request_builder("/metrics") .method("GET")
models.py
""" Transformer encoder / decoder layer chain """ import numpy as np import tensorflow as tf import lib.layers from . import layers, ops from .data import linelen class Transformer: def __init__( self, name, inp_voc, out_voc, logits_bias=False, share_emb=False, dst_rand_offset=False, rescale_emb=True, inp_emb_bias=False, emb_inp_device='', emb_out_device='', **kwargs ): """ Transformer-based model that predicts logp(insert(i, token) | x, y) :type inp_voc: lib.voc.Voc :type out_voc: lib.voc.Voc :param logits_bias: if True, final logits layer has bias term. :param share_emb: if True, input and output embeddings will use the same matrix. Useful for in case of shared vocabularies or when there is a :param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions :param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding """ self.name = name self.inp_voc, self.out_voc = inp_voc, out_voc self.dst_rand_offset = dst_rand_offset self.hp = kwargs emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512)) max_voc_size = max(len(inp_voc), len(out_voc)) with tf.variable_scope(self.name) as self.scope: # Embeddings self.emb_inp = layers.TransformerEmbedding( 'emb_inp', max_voc_size if share_emb else len(inp_voc), emb_size, bias=inp_emb_bias, rescale=rescale_emb, device=emb_inp_device)
'emb_out', max_voc_size if share_emb else len(out_voc), emb_size, matrix=self.emb_inp.emb.mat if share_emb else None, rescale=rescale_emb, device=emb_out_device) # Model body self.encoder = layers.TransformerChain('enc', **kwargs) self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'], **kwargs) # logits: token insertions plus one extra logit to predict position where to insert self.logits = layers.Dense( 'logits', kwargs['hid_size'], len(out_voc) + 1, matrix=tf.transpose(self.emb_out.emb.mat) if kwargs.get('dwwt', False) else None, bias=None if logits_bias else 0 ) def _get_batch_sample(self): """ A minimal example of model input data """ return [("i saw a cat", "i write the code")] def make_encoder_batch_ph(self): return { 'inp': tf.placeholder('int32', [None, None]), 'inp_len': tf.placeholder('int32', [None]) } def make_feed_dict(self, batch, **kwargs): """ Take input data strings, return a dict { key: np.array(value) } """ inp_lines, out_lines = zip(*batch) inp_len = [linelen(line) for line in inp_lines] out_len = [linelen(line) for line in out_lines] return { 'inp': self.inp_voc.to_matrix(inp_lines), 'inp_len': np.array(inp_len, 'int32'), 'out': self.out_voc.to_matrix(out_lines), 'out_len': np.array(out_len, 'int32') } def encode(self, batch, is_train): """ Take placeholders for data batch, return encoder state """ with tf.name_scope(self.name), ops.dropout_scope(is_train): inp = batch['inp'] # [batch_size * ninp] inp_len = batch.get('inp_len', ops.infer_length(inp, self.inp_voc.eos)) # [batch] attn_mask = ops.make_attn_mask(inp, inp_len) # [batch_size, 1, 1, ninp] out, _ = self.encoder(self.emb_inp(inp), self_attn_mask=attn_mask) # ^-- [batch_size, ninp, hid_size] return dict(out=out, attn_mask=attn_mask) def compute_action_logprobs(self, batch, is_train, enc=None, temperature=None): """ Compute log-probabilities for all possible actions (aka agent policy) :param batch: a dict with - token matrix 'out'[batch_size, output_length] - optional length vector out_len[batch_size] :param is_train: whether or not to use training behavior (e.g. dropout) :returns: {'insert':logp(insert(i, c) | x, y), 'finish':logp(terminate| x, y)} """ enc = self.encode(batch, is_train) if enc is None else enc with tf.name_scope(self.name), ops.dropout_scope(is_train): out = batch['out'] # partial translation, shape: [batch_size * nout] out_len = batch.get('out_len', ops.infer_length(out, self.out_voc.eos)) # [batch] # embedding. Note: at this point, a special "zero" vector is added # to the first position hence length is increased by 1 out_padded = tf.concat([tf.zeros_like(out[:, :1]), out], axis=1) # [batch_size, nout+1] dec_emb = self.emb_out(out_padded, offset='random' if self.dst_rand_offset else 0) # ^-- shape: [batch_size, nout + 1] # run decoder attn_mask = ops.make_attn_mask(out_padded, out_len + 1) # [batch_size, 1, 1, nout + 1] dec_out, _ = self.decoder(dec_emb, self_attn_mask=attn_mask, enc_out=enc['out'], enc_attn_mask=enc['attn_mask']) # ^-- [batch_size, nout + 1, hid_size] logits = self.logits(dec_out) # [batch_size, nout + 1, voc_size + 1] if temperature is not None: logits /= temperature # compute log-probabilities for actions # position log-probabilities, logP(insert(pos, *) | ...) # used to predict position of next insert and termination condition (EOS) position_logits = logits[:, :, -1] # [batch_size, nout + 1] position_mask = tf.cast(attn_mask, tf.bool)[:, 0, 0, :] # [batch_size, nout + 1] position_logits = tf.where(position_mask, position_logits, tf.fill(tf.shape(position_logits), -1e9)) position_logp = tf.nn.log_softmax(position_logits, axis=-1) # [batch_size, n_out] # two actions: insert - at any non-EOS position - or finish - defined as inserting at EOS finish_logp = tf.gather_nd(position_logp, tf.stack([tf.range(tf.shape(out_len)[0]), out_len], axis=1)) # ^-- [batch_size] insert_position_logp = tf.where(position_mask[:, 1:], position_logp[:, :-1], tf.fill(tf.shape(position_logp[:, :-1]), -1e9)) # ^-- [batch_size, nout] # insertion log-probabilities: # logP(insert(pos, tok) | ...) = logP(insert(pos, *) | ...) + logP(insert(pos, tok) | insert(pos, *), ...) token_logits = logits[:, :-1, :len(self.out_voc)] # [batch_size, n_out, voc_size] token_logp_given_position = tf.nn.log_softmax(token_logits, axis=-1) # note: we do not need mask on token_logp_given_position cuz mask is already applied to insert_position_logp insert_logp = insert_position_logp[:, :, None] + token_logp_given_position return { # group 1 (exps sum to 1) 'insert': insert_logp, # [batch_size, nout, voc_size] 'finish': finish_logp, # [batch_size] } class ImgToSeqTransformer(Transformer): def __init__( self, name, out_voc, inp_w, inp_h, inp_channels=3, make_encoder=lib.layers.ImageEncoder, logits_bias=False, share_emb=False, dst_rand_offset=False, rescale_emb=True, emb_out_device='', **kwargs ): """ Transformer-based model that predicts logp(insert(i, token) | x, y) :type out_voc: lib.voc.Voc :param logits_bias: if True, final logits layer has bias term. :param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions :param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding """ self.name = name self.inp_voc, self.out_voc = out_voc, out_voc # inp voc is a stub, the same as out_voc self.dst_rand_offset = dst_rand_offset self.hp = kwargs self.w = inp_w self.h = inp_h self.inp_channels = inp_channels emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512)) max_voc_size = len(out_voc) with tf.variable_scope(self.name) as self.scope: # Embeddings self.emb_out = layers.TransformerEmbedding( 'emb_out', max_voc_size if share_emb else len(out_voc), emb_size, matrix=self.emb_inp.emb.mat if share_emb else None, rescale=rescale_emb, device=emb_out_device) # Model body self.encoder = make_encoder('enc', inp_h=inp_w, inp_w=inp_h, inp_channels=inp_channels, **kwargs) enc_out_shape = self.encode(self.make_encoder_batch_ph(), True)['out'].shape assert enc_out_shape.ndims == 3 and enc_out_shape[-1].value is not None, \ "encoder output shape must be a 3d tensor with fixed num units, " \ "got shape {}".format(enc_out_shape) self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'], attn_input_sizes={'enc': enc_out_shape[-1].value}, **kwargs) # logits: token insertions plus one extra logit to predict position where to insert self.logits = layers.Dense( 'logits', kwargs['hid_size'], len(out_voc) + 1, bias=None if logits_bias else 0 ) def _get_batch_sample(self): """ A minimal example of model input data """ return [(np.zeros((self.h, self.w, self.inp_channels)), 'A cat sat')] def make_feed_dict(self, batch, **kwargs): """ Take input data strings, return a dict { key: np.array(value) } """ inp_imgs, out_lines = zip(*batch) out_len = [linelen(line) for line in out_lines] return { 'inp': np.array(inp_imgs, 'float32'), 'out': self.out_voc.to_matrix(out_lines), 'out_len': np.array(out_len, 'int32') } def make_encoder_batch_ph(self): return { 'inp': tf.placeholder('float32', [None, self.h, self.w, self.inp_channels]), } def encode(self, batch, is_train): """ Take placeholders for data batch, return encoder state """ with tf.name_scope(self.name), ops.dropout_scope(is_train): inp = batch['inp'] # [batch_size * ninp] out = self.encoder(inp) assert out.shape[-1] is not None out_shape = tf.shape(out) out = tf.reshape(out, [out_shape[0], -1, out.shape[-1]]) attn_mask = tf.ones((out_shape[0], 1, 1, out_shape[1] * out_shape[2])) # [batch_size, 1, 1, ninp] return dict(out=out, attn_mask=attn_mask)
self.emb_out = layers.TransformerEmbedding(
TTS.py
from ibm_watson import TextToSpeechV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator def main(text,audio): authenticator = IAMAuthenticator('7-KyTRyrBXQSuRQO7wazH5Q-Q_5QzDs6R0qOZqD1hyu6') text_to_speech = TextToSpeechV1( authenticator=authenticator ) text_to_speech.set_service_url('https://api.us-east.text-to-speech.watson.cloud.ibm.com') audio_file = open(audio, 'wb') res = text_to_speech.synthesize(text, accept='audio/mp3', voice='en-US_AllisonV3Voice').get_result() audio_file.write(res.content) audio_file.close() if __name__ == "__main__":
main('In order to get a final response from STT we send a stop, this will force a final=True return message.','./output.mp3')
lib.rs
// DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! //! This documentation was generated from *Dataproc* crate version *1.0.8+20181005*, where *20181005* is the exact revision of the *dataproc:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.8*. //! //! Everything else about the *Dataproc* *v1* API can be found at the //! [official documentation site](https://cloud.google.com/dataproc/). //! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/dataproc1). //! # Features //! //! Handle the following *Resources* with ease from the central [hub](struct.Dataproc.html) ... //! //! * projects //! * [*locations workflow templates create*](struct.ProjectLocationWorkflowTemplateCreateCall.html), [*locations workflow templates delete*](struct.ProjectLocationWorkflowTemplateDeleteCall.html), [*locations workflow templates get*](struct.ProjectLocationWorkflowTemplateGetCall.html), [*locations workflow templates get iam policy*](struct.ProjectLocationWorkflowTemplateGetIamPolicyCall.html), [*locations workflow templates instantiate*](struct.ProjectLocationWorkflowTemplateInstantiateCall.html), [*locations workflow templates instantiate inline*](struct.ProjectLocationWorkflowTemplateInstantiateInlineCall.html), [*locations workflow templates list*](struct.ProjectLocationWorkflowTemplateListCall.html), [*locations workflow templates set iam policy*](struct.ProjectLocationWorkflowTemplateSetIamPolicyCall.html), [*locations workflow templates test iam permissions*](struct.ProjectLocationWorkflowTemplateTestIamPermissionCall.html), [*locations workflow templates update*](struct.ProjectLocationWorkflowTemplateUpdateCall.html), [*regions clusters create*](struct.ProjectRegionClusterCreateCall.html), [*regions clusters delete*](struct.ProjectRegionClusterDeleteCall.html), [*regions clusters diagnose*](struct.ProjectRegionClusterDiagnoseCall.html), [*regions clusters get*](struct.ProjectRegionClusterGetCall.html), [*regions clusters get iam policy*](struct.ProjectRegionClusterGetIamPolicyCall.html), [*regions clusters list*](struct.ProjectRegionClusterListCall.html), [*regions clusters patch*](struct.ProjectRegionClusterPatchCall.html), [*regions clusters set iam policy*](struct.ProjectRegionClusterSetIamPolicyCall.html), [*regions clusters test iam permissions*](struct.ProjectRegionClusterTestIamPermissionCall.html), [*regions jobs cancel*](struct.ProjectRegionJobCancelCall.html), [*regions jobs delete*](struct.ProjectRegionJobDeleteCall.html), [*regions jobs get*](struct.ProjectRegionJobGetCall.html), [*regions jobs get iam policy*](struct.ProjectRegionJobGetIamPolicyCall.html), [*regions jobs list*](struct.ProjectRegionJobListCall.html), [*regions jobs patch*](struct.ProjectRegionJobPatchCall.html), [*regions jobs set iam policy*](struct.ProjectRegionJobSetIamPolicyCall.html), [*regions jobs submit*](struct.ProjectRegionJobSubmitCall.html), [*regions jobs test iam permissions*](struct.ProjectRegionJobTestIamPermissionCall.html), [*regions operations cancel*](struct.ProjectRegionOperationCancelCall.html), [*regions operations delete*](struct.ProjectRegionOperationDeleteCall.html), [*regions operations get*](struct.ProjectRegionOperationGetCall.html), [*regions operations get iam policy*](struct.ProjectRegionOperationGetIamPolicyCall.html), [*regions operations list*](struct.ProjectRegionOperationListCall.html), [*regions operations set iam policy*](struct.ProjectRegionOperationSetIamPolicyCall.html), [*regions operations test iam permissions*](struct.ProjectRegionOperationTestIamPermissionCall.html), [*regions workflow templates create*](struct.ProjectRegionWorkflowTemplateCreateCall.html), [*regions workflow templates delete*](struct.ProjectRegionWorkflowTemplateDeleteCall.html), [*regions workflow templates get*](struct.ProjectRegionWorkflowTemplateGetCall.html), [*regions workflow templates get iam policy*](struct.ProjectRegionWorkflowTemplateGetIamPolicyCall.html), [*regions workflow templates instantiate*](struct.ProjectRegionWorkflowTemplateInstantiateCall.html), [*regions workflow templates instantiate inline*](struct.ProjectRegionWorkflowTemplateInstantiateInlineCall.html), [*regions workflow templates list*](struct.ProjectRegionWorkflowTemplateListCall.html), [*regions workflow templates set iam policy*](struct.ProjectRegionWorkflowTemplateSetIamPolicyCall.html), [*regions workflow templates test iam permissions*](struct.ProjectRegionWorkflowTemplateTestIamPermissionCall.html) and [*regions workflow templates update*](struct.ProjectRegionWorkflowTemplateUpdateCall.html) //! //! //! //! //! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs). //! //! # Structure of this Library //! //! The API is structured into the following primary items: //! //! * **[Hub](struct.Dataproc.html)** //! * a central object to maintain state and allow accessing all *Activities* //! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn //! allow access to individual [*Call Builders*](trait.CallBuilder.html) //! * **[Resources](trait.Resource.html)** //! * primary types that you can apply *Activities* to //! * a collection of properties and *Parts* //! * **[Parts](trait.Part.html)** //! * a collection of properties //! * never directly used in *Activities* //! * **[Activities](trait.CallBuilder.html)** //! * operations to apply to *Resources* //! //! All *structures* are marked with applicable traits to further categorize them and ease browsing. //! //! Generally speaking, you can invoke *Activities* like this: //! //! ```Rust,ignore //! let r = hub.resource().activity(...).doit() //! ``` //! //! Or specifically ... //! //! ```ignore //! let r = hub.projects().regions_clusters_get_iam_policy(...).doit() //! let r = hub.projects().regions_workflow_templates_set_iam_policy(...).doit() //! let r = hub.projects().regions_clusters_set_iam_policy(...).doit() //! let r = hub.projects().regions_workflow_templates_get_iam_policy(...).doit() //! let r = hub.projects().regions_jobs_get_iam_policy(...).doit() //! let r = hub.projects().regions_operations_get_iam_policy(...).doit() //! let r = hub.projects().regions_operations_set_iam_policy(...).doit() //! let r = hub.projects().locations_workflow_templates_set_iam_policy(...).doit() //! let r = hub.projects().regions_jobs_set_iam_policy(...).doit() //! let r = hub.projects().locations_workflow_templates_get_iam_policy(...).doit() //! ``` //! //! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities` //! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be //! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired. //! The `doit()` method performs the actual communication with the server and returns the respective result. //! //! # Usage //! //! ## Setting up your Project //! //! To use this library, you would put the following lines into your `Cargo.toml` file: //! //! ```toml //! [dependencies] //! google-dataproc1 = "*" //! # This project intentionally uses an old version of Hyper. See //! # https://github.com/Byron/google-apis-rs/issues/173 for more //! # information. //! hyper = "^0.10" //! hyper-rustls = "^0.6" //! serde = "^1.0" //! serde_json = "^1.0" //! yup-oauth2 = "^1.0" //! ``` //! //! ## A complete example //! //! ```test_harness,no_run //! extern crate hyper; //! extern crate hyper_rustls; //! extern crate yup_oauth2 as oauth2; //! extern crate google_dataproc1 as dataproc1; //! use dataproc1::GetIamPolicyRequest; //! use dataproc1::{Result, Error}; //! # #[test] fn egal() { //! use std::default::Default; //! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; //! use dataproc1::Dataproc; //! //! // Get an ApplicationSecret instance by some means. It contains the `client_id` and //! // `client_secret`, among other things. //! let secret: ApplicationSecret = Default::default(); //! // Instantiate the authenticator. It will choose a suitable authentication flow for you, //! // unless you replace `None` with the desired Flow. //! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about //! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and //! // retrieve them from storage. //! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, //! hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), //! <MemoryStorage as Default>::default(), None); //! let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); //! // As the method needs a request, you would usually fill it with the desired information //! // into the respective structure. Some of the parts shown here might not be applicable ! //! // Values shown here are possibly random and not representative ! //! let mut req = GetIamPolicyRequest::default(); //! //! // You can configure optional parameters by calling the respective setters at will, and //! // execute the final call using `doit()`. //! // Values shown here are possibly random and not representative ! //! let result = hub.projects().regions_clusters_get_iam_policy(req, "resource") //! .doit(); //! //! match result { //! Err(e) => match e { //! // The Error enum provides details about what exactly happened. //! // You can also just use its `Debug`, `Display` or `Error` traits //! Error::HttpError(_) //! |Error::MissingAPIKey //! |Error::MissingToken(_) //! |Error::Cancelled //! |Error::UploadSizeLimitExceeded(_, _) //! |Error::Failure(_) //! |Error::BadRequest(_) //! |Error::FieldClash(_) //! |Error::JsonDecodeError(_, _) => println!("{}", e), //! }, //! Ok(res) => println!("Success: {:?}", res), //! } //! # } //! ``` //! ## Handling Errors //! //! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of //! the doit() methods, or handed as possibly intermediate results to either the //! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html). //! //! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This //! makes the system potentially resilient to all kinds of errors. //! //! ## Uploads and Downloads //! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be //! read by you to obtain the media. //! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default. //! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making //! this call: `.param("alt", "media")`. //! //! Methods supporting uploads can do so using up to 2 different protocols: //! *simple* and *resumable*. The distinctiveness of each is represented by customized //! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively. //! //! ## Customization and Callbacks //! //! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the //! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call. //! Respective methods will be called to provide progress information, as well as determine whether the system should //! retry on failure. //! //! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort. //! //! ## Optional Parts in Server-Requests //! //! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and //! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses //! are valid. //! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to //! the server to indicate either the set parts of the request or the desired parts in the response. //! //! ## Builder Arguments //! //! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods. //! These will always take a single argument, for which the following statements are true. //! //! * [PODs][wiki-pod] are handed by copy //! * strings are passed as `&str` //! * [request values](trait.RequestValue.html) are moved //! //! Arguments will always be copied or cloned into the builder, to make them independent of their original life times. //! //! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure //! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern //! [google-go-api]: https://github.com/google/google-api-go-client //! //! // Unused attributes happen thanks to defined, but unused structures // We don't warn about this, as depending on the API, some data structures or facilities are never used. // Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any // unused imports in fully featured APIs. Same with unused_mut ... . #![allow(unused_imports, unused_mut, dead_code)] // DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde; extern crate serde_json; extern crate yup_oauth2 as oauth2; extern crate mime; extern crate url; mod cmn; use std::collections::HashMap; use std::cell::RefCell; use std::borrow::BorrowMut; use std::default::Default; use std::collections::BTreeMap; use serde_json as json; use std::io; use std::fs; use std::mem; use std::thread::sleep; use std::time::Duration; pub use cmn::{MultiPartReader, ToParts, MethodInfo, Result, Error, CallBuilder, Hub, ReadSeek, Part, ResponseResult, RequestValue, NestedType, Delegate, DefaultDelegate, MethodsBuilder, Resource, ErrorResponse, remove_json_null_values}; // ############## // UTILITIES ### // ############ /// Identifies the an OAuth2 authorization scope. /// A scope is needed when requesting an /// [authorization token](https://developers.google.com/youtube/v3/guides/authentication). #[derive(PartialEq, Eq, Hash)] pub enum Scope { /// View and manage your data across Google Cloud Platform services CloudPlatform, } impl AsRef<str> for Scope { fn as_ref(&self) -> &str { match *self { Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform", } } } impl Default for Scope { fn default() -> Scope { Scope::CloudPlatform } } // ######## // HUB ### // ###### /// Central instance to access all Dataproc related resource activities /// /// # Examples /// /// Instantiate a new hub /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_dataproc1 as dataproc1; /// use dataproc1::GetIamPolicyRequest; /// use dataproc1::{Result, Error}; /// # #[test] fn egal() { /// use std::default::Default; /// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// use dataproc1::Dataproc; /// /// // Get an ApplicationSecret instance by some means. It contains the `client_id` and /// // `client_secret`, among other things. /// let secret: ApplicationSecret = Default::default(); /// // Instantiate the authenticator. It will choose a suitable authentication flow for you, /// // unless you replace `None` with the desired Flow. /// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about /// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and /// // retrieve them from storage. /// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// <MemoryStorage as Default>::default(), None); /// let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_get_iam_policy(req, "resource") /// .doit(); /// /// match result { /// Err(e) => match e { /// // The Error enum provides details about what exactly happened. /// // You can also just use its `Debug`, `Display` or `Error` traits /// Error::HttpError(_) /// |Error::MissingAPIKey /// |Error::MissingToken(_) /// |Error::Cancelled /// |Error::UploadSizeLimitExceeded(_, _) /// |Error::Failure(_) /// |Error::BadRequest(_) /// |Error::FieldClash(_) /// |Error::JsonDecodeError(_, _) => println!("{}", e), /// }, /// Ok(res) => println!("Success: {:?}", res), /// } /// # } /// ``` pub struct Dataproc<C, A> { client: RefCell<C>, auth: RefCell<A>, _user_agent: String, _base_url: String, _root_url: String, } impl<'a, C, A> Hub for Dataproc<C, A> {} impl<'a, C, A> Dataproc<C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { pub fn new(client: C, authenticator: A) -> Dataproc<C, A> { Dataproc { client: RefCell::new(client), auth: RefCell::new(authenticator), _user_agent: "google-api-rust-client/1.0.8".to_string(), _base_url: "https://dataproc.googleapis.com/".to_string(), _root_url: "https://dataproc.googleapis.com/".to_string(), } } pub fn projects(&'a self) -> ProjectMethods<'a, C, A> { ProjectMethods { hub: &self } } /// Set the user-agent header field to use in all requests to the server. /// It defaults to `google-api-rust-client/1.0.8`. /// /// Returns the previously set user-agent. pub fn user_agent(&mut self, agent_name: String) -> String { mem::replace(&mut self._user_agent, agent_name) } /// Set the base url to use in all requests to the server. /// It defaults to `https://dataproc.googleapis.com/`. /// /// Returns the previously set base url. pub fn base_url(&mut self, new_base_url: String) -> String { mem::replace(&mut self._base_url, new_base_url) } /// Set the root url to use in all requests to the server. /// It defaults to `https://dataproc.googleapis.com/`. /// /// Returns the previously set root url. pub fn root_url(&mut self, new_root_url: String) -> String { mem::replace(&mut self._root_url, new_root_url) } } // ############ // SCHEMAS ### // ########## /// A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SparkSqlJob { /// The HCFS URI of the script that contains SQL queries. #[serde(rename="queryFileUri")] pub query_file_uri: Option<String>, /// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). #[serde(rename="scriptVariables")] pub script_variables: Option<HashMap<String, String>>, /// Optional. The runtime log config for job execution. #[serde(rename="loggingConfig")] pub logging_config: Option<LoggingConfig>, /// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. #[serde(rename="jarFileUris")] pub jar_file_uris: Option<Vec<String>>, /// A list of queries. #[serde(rename="queryList")] pub query_list: Option<QueryList>, /// Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. pub properties: Option<HashMap<String, String>>, } impl Part for SparkSqlJob {} /// Job scheduling options. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct JobScheduling { /// Optional. Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10. #[serde(rename="maxFailuresPerHour")] pub max_failures_per_hour: Option<i32>, } impl Part for JobScheduling {} /// Optional. The config settings for Compute Engine resources in an instance group, such as a master or worker group. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct InstanceGroupConfig { /// Optional. Specifies that this instance group contains preemptible instances. #[serde(rename="isPreemptible")] pub is_preemptible: Option<bool>, /// Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: /// https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 /// projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 /// n1-standard-2Auto Zone Exception: If you are using the Cloud Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2. #[serde(rename="machineTypeUri")] pub machine_type_uri: Option<String>, /// Output only. The list of instance names. Cloud Dataproc derives the names from cluster_name, num_instances, and the instance group. #[serde(rename="instanceNames")] pub instance_names: Option<Vec<String>>, /// Optional. Disk option config settings. #[serde(rename="diskConfig")] pub disk_config: Option<DiskConfig>, /// Optional. The Compute Engine accelerator configuration for these instances.Beta Feature: This feature is still under development. It may be changed before final release. pub accelerators: Option<Vec<AcceleratorConfig>>, /// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. #[serde(rename="managedGroupConfig")] pub managed_group_config: Option<ManagedGroupConfig>, /// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1. #[serde(rename="numInstances")] pub num_instances: Option<i32>, /// Optional. The Compute Engine image resource used for cluster instances. It can be specified or may be inferred from SoftwareConfig.image_version. #[serde(rename="imageUri")] pub image_uri: Option<String>, } impl Part for InstanceGroupConfig {} /// A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector) /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct TemplateParameter { /// Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as <code>placement.clusterSelector.zone</code>.Also, field paths can reference fields using the following syntax: /// Values in maps can be referenced by key. Examples<br> /// labels'key' /// placement.clusterSelector.clusterLabels'key' /// placement.managedCluster.labels'key' /// placement.clusterSelector.clusterLabels'key' /// jobsstep-id.labels'key' /// Jobs in the jobs list can be referenced by step-id. Examples:<br> /// jobsstep-id.hadoopJob.mainJarFileUri /// jobsstep-id.hiveJob.queryFileUri /// jobsstep-id.pySparkJob.mainPythonFileUri /// jobsstep-id.hadoopJob.jarFileUris0 /// jobsstep-id.hadoopJob.archiveUris0 /// jobsstep-id.hadoopJob.fileUris0 /// jobsstep-id.pySparkJob.pythonFileUris0 /// Items in repeated fields can be referenced by a zero-based index. Example:<br> /// jobsstep-id.sparkJob.args0 /// Other examples: /// jobsstep-id.hadoopJob.properties'key' /// jobsstep-id.hadoopJob.args0 /// jobsstep-id.hiveJob.scriptVariables'key' /// jobsstep-id.hadoopJob.mainJarFileUri /// placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: /// placement.clusterSelector.clusterLabels /// jobsstep-id.sparkJob.args pub fields: Option<Vec<String>>, /// Optional. Validation rules to be applied to this parameter's value. pub validation: Option<ParameterValidation>, /// Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. pub name: Option<String>, /// Optional. Brief description of the parameter. Must not exceed 1024 characters. pub description: Option<String>, } impl Part for TemplateParameter {} /// A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.Beta Feature: This report is available for testing purposes only. It may be changed before final release. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct YarnApplication { /// Required. The numerical progress of the application, from 1 to 100. pub progress: Option<f32>, /// Required. The application state. pub state: Option<String>, /// Required. The application name. pub name: Option<String>, /// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access. #[serde(rename="trackingUrl")] pub tracking_url: Option<String>, } impl Part for YarnApplication {} /// Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.A Policy consists of a list of bindings. A binding binds a list of members to a role, where the members can be user accounts, Google groups, Google domains, and service accounts. A role is a named list of permissions defined by IAM.JSON Example /// { /// "bindings": [ /// { /// "role": "roles/owner", /// "members": [ /// "user:[email protected]", /// "group:[email protected]", /// "domain:google.com", /// "serviceAccount:[email protected]" /// ] /// }, /// { /// "role": "roles/viewer", /// "members": ["user:[email protected]"] /// } /// ] /// } /// YAML Example /// bindings: /// - members: /// - user:[email protected] /// - group:[email protected] /// - domain:google.com /// - serviceAccount:[email protected] /// role: roles/owner /// - members: /// - user:[email protected] /// role: roles/viewer /// For a description of IAM and its features, see the IAM developer's guide (https://cloud.google.com/iam/docs). /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions clusters get iam policy projects](struct.ProjectRegionClusterGetIamPolicyCall.html) (response) /// * [regions workflow templates set iam policy projects](struct.ProjectRegionWorkflowTemplateSetIamPolicyCall.html) (response) /// * [regions clusters set iam policy projects](struct.ProjectRegionClusterSetIamPolicyCall.html) (response) /// * [regions workflow templates get iam policy projects](struct.ProjectRegionWorkflowTemplateGetIamPolicyCall.html) (response) /// * [regions jobs get iam policy projects](struct.ProjectRegionJobGetIamPolicyCall.html) (response) /// * [regions operations get iam policy projects](struct.ProjectRegionOperationGetIamPolicyCall.html) (response) /// * [regions operations set iam policy projects](struct.ProjectRegionOperationSetIamPolicyCall.html) (response) /// * [locations workflow templates set iam policy projects](struct.ProjectLocationWorkflowTemplateSetIamPolicyCall.html) (response) /// * [regions jobs set iam policy projects](struct.ProjectRegionJobSetIamPolicyCall.html) (response) /// * [locations workflow templates get iam policy projects](struct.ProjectLocationWorkflowTemplateGetIamPolicyCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Policy { /// Associates a list of members to a role. bindings with no members will result in an error. pub bindings: Option<Vec<Binding>>, /// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.If no etag is provided in the call to setIamPolicy, then the existing policy is overwritten blindly. pub etag: Option<String>, /// Deprecated. pub version: Option<i32>, } impl ResponseResult for Policy {} /// A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: /// service Foo { /// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); /// } /// The JSON representation for Empty is empty JSON object {}. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions workflow templates delete projects](struct.ProjectRegionWorkflowTemplateDeleteCall.html) (response) /// * [regions jobs delete projects](struct.ProjectRegionJobDeleteCall.html) (response) /// * [regions operations cancel projects](struct.ProjectRegionOperationCancelCall.html) (response) /// * [locations workflow templates delete projects](struct.ProjectLocationWorkflowTemplateDeleteCall.html) (response) /// * [regions operations delete projects](struct.ProjectRegionOperationDeleteCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Empty { _never_set: Option<bool> } impl ResponseResult for Empty {} /// The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be: /// Simple to use and understand for most users /// Flexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc that can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include: /// Partial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors. /// Workflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting. /// Batch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response. /// Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message. /// Logging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Status { /// A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. pub message: Option<String>, /// The status code, which should be an enum value of google.rpc.Code. pub code: Option<i32>, /// A list of messages that carry the error details. There is a common set of message types for APIs to use. pub details: Option<Vec<HashMap<String, String>>>, } impl Part for Status {} /// Response message for TestIamPermissions method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions workflow templates test iam permissions projects](struct.ProjectRegionWorkflowTemplateTestIamPermissionCall.html) (response) /// * [locations workflow templates test iam permissions projects](struct.ProjectLocationWorkflowTemplateTestIamPermissionCall.html) (response) /// * [regions operations test iam permissions projects](struct.ProjectRegionOperationTestIamPermissionCall.html) (response) /// * [regions clusters test iam permissions projects](struct.ProjectRegionClusterTestIamPermissionCall.html) (response) /// * [regions jobs test iam permissions projects](struct.ProjectRegionJobTestIamPermissionCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct TestIamPermissionsResponse { /// A subset of TestPermissionsRequest.permissions that the caller is allowed. pub permissions: Option<Vec<String>>, } impl ResponseResult for TestIamPermissionsResponse {} /// Encapsulates the full scoping used to reference a job. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct JobReference { /// Required. The ID of the Google Cloud Platform project that the job belongs to. #[serde(rename="projectId")] pub project_id: Option<String>, /// Optional. The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters. #[serde(rename="jobId")] pub job_id: Option<String>, } impl Part for JobReference {} /// Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct GceClusterConfig { /// Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. #[serde(rename="internalIpOnly")] pub internal_ip_only: Option<bool>, /// Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks for more information).A full URL, partial URI, or short name are valid. Examples: /// https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default /// projects/[project_id]/regions/global/default /// default #[serde(rename="networkUri")] pub network_uri: Option<String>, /// The Compute Engine tags to add to all instances (see Tagging instances). pub tags: Option<Vec<String>>, /// Optional. The service account of the instances. Defaults to the default Compute Engine service account. Custom service accounts need permissions equivalent to the following IAM roles: /// roles/logging.logWriter /// roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: [account_id]@[project_id].iam.gserviceaccount.com #[serde(rename="serviceAccount")] pub service_account: Option<String>, /// Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Cloud Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: /// https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] /// projects/[project_id]/zones/[zone] /// us-central1-f #[serde(rename="zoneUri")] pub zone_uri: Option<String>, /// Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: /// https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0 /// projects/[project_id]/regions/us-east1/sub0 /// sub0 #[serde(rename="subnetworkUri")] pub subnetwork_uri: Option<String>, /// Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: /// https://www.googleapis.com/auth/cloud.useraccounts.readonly /// https://www.googleapis.com/auth/devstorage.read_write /// https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: /// https://www.googleapis.com/auth/bigquery /// https://www.googleapis.com/auth/bigtable.admin.table /// https://www.googleapis.com/auth/bigtable.data /// https://www.googleapis.com/auth/devstorage.full_control #[serde(rename="serviceAccountScopes")] pub service_account_scopes: Option<Vec<String>>, /// The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). pub metadata: Option<HashMap<String, String>>, } impl Part for GceClusterConfig {} /// Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct AcceleratorConfig { /// The number of the accelerator cards of this type exposed to this instance. #[serde(rename="acceleratorCount")] pub accelerator_count: Option<i32>, /// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes.Examples: /// https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 /// projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 /// nvidia-tesla-k80Auto Zone Exception: If you are using the Cloud Dataproc Auto Zone Placement feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80. #[serde(rename="acceleratorTypeUri")] pub accelerator_type_uri: Option<String>, } impl Part for AcceleratorConfig {} /// The cluster config. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ClusterConfig { /// Optional. The config settings for software inside the cluster. #[serde(rename="softwareConfig")] pub software_config: Option<SoftwareConfig>, /// Optional. A Cloud Storage staging bucket used for sharing generated SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you. #[serde(rename="configBucket")] pub config_bucket: Option<String>, /// Required. The shared Compute Engine config settings for all instances in a cluster. #[serde(rename="gceClusterConfig")] pub gce_cluster_config: Option<GceClusterConfig>, /// Optional. The Compute Engine config settings for worker instances in a cluster. #[serde(rename="workerConfig")] pub worker_config: Option<InstanceGroupConfig>, /// Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): /// ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) /// if [[ "${ROLE}" == 'Master' ]]; then /// ... master specific actions ... /// else /// ... worker specific actions ... /// fi /// #[serde(rename="initializationActions")] pub initialization_actions: Option<Vec<NodeInitializationAction>>, /// Optional. Encryption settings for the cluster. #[serde(rename="encryptionConfig")] pub encryption_config: Option<EncryptionConfig>, /// Optional. The Compute Engine config settings for additional worker instances in a cluster. #[serde(rename="secondaryWorkerConfig")] pub secondary_worker_config: Option<InstanceGroupConfig>, /// Optional. The Compute Engine config settings for the master instance in a cluster. #[serde(rename="masterConfig")] pub master_config: Option<InstanceGroupConfig>, } impl Part for ClusterConfig {} /// A request to submit a job. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions jobs submit projects](struct.ProjectRegionJobSubmitCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SubmitJobRequest { /// Required. The job resource. pub job: Option<Job>, /// Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest requests with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. #[serde(rename="requestId")] pub request_id: Option<String>, } impl RequestValue for SubmitJobRequest {} /// A list of queries to run on a cluster. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct QueryList { /// Required. The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob: /// "hiveJob": { /// "queryList": { /// "queries": [ /// "query1", /// "query2", /// "query3;query4", /// ] /// } /// } /// pub queries: Option<Vec<String>>, } impl Part for QueryList {} /// Specifies the config of disk options for a group of VM instances. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct DiskConfig { /// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. #[serde(rename="numLocalSsds")] pub num_local_ssds: Option<i32>, /// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). #[serde(rename="bootDiskType")] pub boot_disk_type: Option<String>, /// Optional. Size in GB of the boot disk (default is 500GB). #[serde(rename="bootDiskSizeGb")] pub boot_disk_size_gb: Option<i32>, } impl Part for DiskConfig {} /// A Cloud Dataproc workflow template resource. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations workflow templates create projects](struct.ProjectLocationWorkflowTemplateCreateCall.html) (request|response) /// * [regions workflow templates create projects](struct.ProjectRegionWorkflowTemplateCreateCall.html) (request|response) /// * [regions workflow templates instantiate inline projects](struct.ProjectRegionWorkflowTemplateInstantiateInlineCall.html) (request) /// * [regions workflow templates update projects](struct.ProjectRegionWorkflowTemplateUpdateCall.html) (request|response) /// * [locations workflow templates instantiate inline projects](struct.ProjectLocationWorkflowTemplateInstantiateInlineCall.html) (request) /// * [locations workflow templates get projects](struct.ProjectLocationWorkflowTemplateGetCall.html) (response) /// * [locations workflow templates update projects](struct.ProjectLocationWorkflowTemplateUpdateCall.html) (request|response) /// * [regions workflow templates get projects](struct.ProjectRegionWorkflowTemplateGetCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct WorkflowTemplate { /// Output only. The time template was last updated. #[serde(rename="updateTime")] pub update_time: Option<String>, /// Required. The Directed Acyclic Graph of Jobs to submit. pub jobs: Option<Vec<OrderedJob>>, /// Output only. The "resource name" of the template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub name: Option<String>, /// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. pub parameters: Option<Vec<TemplateParameter>>, /// Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template. pub labels: Option<HashMap<String, String>>, /// Required. The template id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. pub id: Option<String>, /// Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. pub version: Option<i32>, /// Required. WorkflowTemplate scheduling information. pub placement: Option<WorkflowTemplatePlacement>, /// Output only. The time template was created. #[serde(rename="createTime")] pub create_time: Option<String>, } impl RequestValue for WorkflowTemplate {} impl ResponseResult for WorkflowTemplate {} /// A Cloud Dataproc job resource. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions jobs submit projects](struct.ProjectRegionJobSubmitCall.html) (response) /// * [regions jobs get projects](struct.ProjectRegionJobGetCall.html) (response) /// * [regions jobs patch projects](struct.ProjectRegionJobPatchCall.html) (request|response) /// * [regions jobs cancel projects](struct.ProjectRegionJobCancelCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Job { /// Output only. The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields. pub status: Option<JobStatus>, /// Job is a SparkSql job. #[serde(rename="sparkSqlJob")] pub spark_sql_job: Option<SparkSqlJob>, /// Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job. pub labels: Option<HashMap<String, String>>, /// Required. Job information, including how, when, and where to run the job. pub placement: Option<JobPlacement>, /// Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>. pub reference: Option<JobReference>, /// Job is a Hadoop job. #[serde(rename="hadoopJob")] pub hadoop_job: Option<HadoopJob>, /// Job is a Pig job. #[serde(rename="pigJob")] pub pig_job: Option<PigJob>, /// Output only. A URI pointing to the location of the stdout of the job's driver program. #[serde(rename="driverOutputResourceUri")] pub driver_output_resource_uri: Option<String>, /// Output only. If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri. #[serde(rename="driverControlFilesUri")] pub driver_control_files_uri: Option<String>, /// Job is a Spark job. #[serde(rename="sparkJob")] pub spark_job: Option<SparkJob>, /// Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It may be changed before final release. #[serde(rename="yarnApplications")] pub yarn_applications: Option<Vec<YarnApplication>>, /// Optional. Job scheduling configuration. pub scheduling: Option<JobScheduling>, /// Output only. The previous job status. #[serde(rename="statusHistory")] pub status_history: Option<Vec<JobStatus>>, /// Job is a Pyspark job. #[serde(rename="pysparkJob")] pub pyspark_job: Option<PySparkJob>, /// Job is a Hive job. #[serde(rename="hiveJob")] pub hive_job: Option<HiveJob>, } impl RequestValue for Job {} impl ResponseResult for Job {} /// Specifies workflow execution target.Either managed_cluster or cluster_selector is required. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct WorkflowTemplatePlacement { /// Optional. A selector that chooses target cluster for jobs based on metadata.The selector is evaluated at the time each job is submitted. #[serde(rename="clusterSelector")] pub cluster_selector: Option<ClusterSelector>, /// Optional. A cluster that is managed by the workflow. #[serde(rename="managedCluster")] pub managed_cluster: Option<ManagedCluster>, } impl Part for WorkflowTemplatePlacement {} /// Specifies the resources used to actively manage an instance group. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ManagedGroupConfig { /// Output only. The name of the Instance Template used for the Managed Instance Group. #[serde(rename="instanceTemplateName")] pub instance_template_name: Option<String>, /// Output only. The name of the Instance Group Manager for this group. #[serde(rename="instanceGroupManagerName")] pub instance_group_manager_name: Option<String>, } impl Part for ManagedGroupConfig {} /// A job executed by the workflow. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct OrderedJob { /// Job is a Hadoop job. #[serde(rename="hadoopJob")] pub hadoop_job: Option<HadoopJob>, /// Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. #[serde(rename="stepId")] pub step_id: Option<String>, /// Job is a SparkSql job. #[serde(rename="sparkSqlJob")] pub spark_sql_job: Option<SparkSqlJob>, /// Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. #[serde(rename="prerequisiteStepIds")] pub prerequisite_step_ids: Option<Vec<String>>, /// Job is a Pig job. #[serde(rename="pigJob")] pub pig_job: Option<PigJob>, /// Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job. pub labels: Option<HashMap<String, String>>, /// Job is a Spark job. #[serde(rename="sparkJob")] pub spark_job: Option<SparkJob>, /// Optional. Job scheduling configuration. pub scheduling: Option<JobScheduling>, /// Job is a Pyspark job. #[serde(rename="pysparkJob")] pub pyspark_job: Option<PySparkJob>, /// Job is a Hive job. #[serde(rename="hiveJob")] pub hive_job: Option<HiveJob>, } impl Part for OrderedJob {} /// A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct PySparkJob { /// Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. #[serde(rename="mainPythonFileUri")] pub main_python_file_uri: Option<String>, /// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. #[serde(rename="jarFileUris")] pub jar_file_uris: Option<Vec<String>>, /// Optional. The runtime log config for job execution. #[serde(rename="loggingConfig")] pub logging_config: Option<LoggingConfig>, /// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. pub args: Option<Vec<String>>, /// Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks. #[serde(rename="fileUris")] pub file_uris: Option<Vec<String>>, /// Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. #[serde(rename="archiveUris")] pub archive_uris: Option<Vec<String>>, /// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. #[serde(rename="pythonFileUris")] pub python_file_uris: Option<Vec<String>>, /// Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. pub properties: Option<HashMap<String, String>>, } impl Part for PySparkJob {} /// Specifies an executable to run on a fully configured node and a timeout period for executable completion. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct NodeInitializationAction { /// Optional. Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. #[serde(rename="executionTimeout")] pub execution_timeout: Option<String>, /// Required. Cloud Storage URI of executable file. #[serde(rename="executableFile")] pub executable_file: Option<String>, } impl Part for NodeInitializationAction {} /// A response to a request to list workflow templates in a project. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations workflow templates list projects](struct.ProjectLocationWorkflowTemplateListCall.html) (response) /// * [regions workflow templates list projects](struct.ProjectRegionWorkflowTemplateListCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListWorkflowTemplatesResponse { /// Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, /// Output only. WorkflowTemplates list. pub templates: Option<Vec<WorkflowTemplate>>, } impl ResponseResult for ListWorkflowTemplatesResponse {} /// Request message for GetIamPolicy method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions clusters get iam policy projects](struct.ProjectRegionClusterGetIamPolicyCall.html) (request) /// * [regions operations get iam policy projects](struct.ProjectRegionOperationGetIamPolicyCall.html) (request) /// * [locations workflow templates get iam policy projects](struct.ProjectLocationWorkflowTemplateGetIamPolicyCall.html) (request) /// * [regions workflow templates get iam policy projects](struct.ProjectRegionWorkflowTemplateGetIamPolicyCall.html) (request) /// * [regions jobs get iam policy projects](struct.ProjectRegionJobGetIamPolicyCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct GetIamPolicyRequest { _never_set: Option<bool> } impl RequestValue for GetIamPolicyRequest {} /// Request message for SetIamPolicy method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations workflow templates set iam policy projects](struct.ProjectLocationWorkflowTemplateSetIamPolicyCall.html) (request) /// * [regions workflow templates set iam policy projects](struct.ProjectRegionWorkflowTemplateSetIamPolicyCall.html) (request) /// * [regions jobs set iam policy projects](struct.ProjectRegionJobSetIamPolicyCall.html) (request) /// * [regions clusters set iam policy projects](struct.ProjectRegionClusterSetIamPolicyCall.html) (request) /// * [regions operations set iam policy projects](struct.ProjectRegionOperationSetIamPolicyCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SetIamPolicyRequest { /// REQUIRED: The complete policy to be applied to the resource. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. pub policy: Option<Policy>, } impl RequestValue for SetIamPolicyRequest {} /// Request message for TestIamPermissions method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions workflow templates test iam permissions projects](struct.ProjectRegionWorkflowTemplateTestIamPermissionCall.html) (request) /// * [locations workflow templates test iam permissions projects](struct.ProjectLocationWorkflowTemplateTestIamPermissionCall.html) (request) /// * [regions operations test iam permissions projects](struct.ProjectRegionOperationTestIamPermissionCall.html) (request) /// * [regions clusters test iam permissions projects](struct.ProjectRegionClusterTestIamPermissionCall.html) (request) /// * [regions jobs test iam permissions projects](struct.ProjectRegionJobTestIamPermissionCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct TestIamPermissionsRequest { /// The set of permissions to check for the resource. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see IAM Overview (https://cloud.google.com/iam/docs/overview#permissions). pub permissions: Option<Vec<String>>, } impl RequestValue for TestIamPermissionsRequest {} /// A request to instantiate a workflow template. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions workflow templates instantiate projects](struct.ProjectRegionWorkflowTemplateInstantiateCall.html) (request) /// * [locations workflow templates instantiate projects](struct.ProjectLocationWorkflowTemplateInstantiateCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct InstantiateWorkflowTemplateRequest { /// Optional. The version of workflow template to instantiate. If specified, the workflow will be instantiated only if the current version of the workflow template has the supplied version.This option cannot be used to instantiate a previous version of workflow template. pub version: Option<i32>, /// Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. #[serde(rename="requestId")] pub request_id: Option<String>, /// Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. pub parameters: Option<HashMap<String, String>>, } impl RequestValue for InstantiateWorkflowTemplateRequest {} /// Represents an expression text. Example: /// title: "User account presence" /// description: "Determines whether the request has a user account" /// expression: "size(request.user) > 0" /// /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Expr { /// An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. pub title: Option<String>, /// Textual representation of an expression in Common Expression Language syntax.The application context of the containing message determines which well-known feature set of CEL is supported. pub expression: Option<String>, /// An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file. pub location: Option<String>, /// An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. pub description: Option<String>, } impl Part for Expr {} /// A request to collect cluster diagnostic information. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions clusters diagnose projects](struct.ProjectRegionClusterDiagnoseCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct DiagnoseClusterRequest { _never_set: Option<bool> } impl RequestValue for DiagnoseClusterRequest {} /// The list of all clusters in a project. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions clusters list projects](struct.ProjectRegionClusterListCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListClustersResponse { /// Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, /// Output only. The clusters in the project. pub clusters: Option<Vec<Cluster>>, } impl ResponseResult for ListClustersResponse {} /// A list of jobs in a project. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions jobs list projects](struct.ProjectRegionJobListCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListJobsResponse { /// Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent <code>ListJobsRequest</code>. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, /// Output only. Jobs list. pub jobs: Option<Vec<Job>>, } impl ResponseResult for ListJobsResponse {} /// Cloud Dataproc job status. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct JobStatus { /// Output only. A state message specifying the overall job state. pub state: Option<String>, /// Output only. The time when this state was entered. #[serde(rename="stateStartTime")] pub state_start_time: Option<String>, /// Output only. Additional state information, which includes status reported by the agent. pub substate: Option<String>, /// Output only. Optional job state details, such as an error description if the state is <code>ERROR</code>. pub details: Option<String>, } impl Part for JobStatus {} /// Cloud Dataproc job config. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct JobPlacement { /// Required. The name of the cluster where the job will be submitted. #[serde(rename="clusterName")] pub cluster_name: Option<String>, /// Output only. A cluster UUID generated by the Cloud Dataproc service when the job is submitted. #[serde(rename="clusterUuid")] pub cluster_uuid: Option<String>, } impl Part for JobPlacement {} /// The status of a cluster and its instances. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ClusterStatus { /// Output only. The cluster's state. pub state: Option<String>, /// Output only. Time when this state was entered. #[serde(rename="stateStartTime")] pub state_start_time: Option<String>, /// Output only. Additional state information that includes status reported by the agent. pub substate: Option<String>, /// Output only. Optional details of cluster's state. pub detail: Option<String>, } impl Part for ClusterStatus {} /// The runtime logging config of the job. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct LoggingConfig { /// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' #[serde(rename="driverLogLevels")] pub driver_log_levels: Option<HashMap<String, String>>, } impl Part for LoggingConfig {} /// Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ClusterMetrics { /// The YARN metrics. #[serde(rename="yarnMetrics")] pub yarn_metrics: Option<HashMap<String, String>>, /// The HDFS metrics. #[serde(rename="hdfsMetrics")] pub hdfs_metrics: Option<HashMap<String, String>>, } impl Part for ClusterMetrics {} /// A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct HiveJob { /// The HCFS URI of the script that contains Hive queries. #[serde(rename="queryFileUri")] pub query_file_uri: Option<String>, /// Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). #[serde(rename="scriptVariables")] pub script_variables: Option<HashMap<String, String>>, /// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. #[serde(rename="continueOnFailure")] pub continue_on_failure: Option<bool>, /// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. #[serde(rename="jarFileUris")] pub jar_file_uris: Option<Vec<String>>, /// A list of queries. #[serde(rename="queryList")] pub query_list: Option<QueryList>, /// Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. pub properties: Option<HashMap<String, String>>, } impl Part for HiveJob {} /// Validation based on regular expressions. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct RegexValidation { /// Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). pub regexes: Option<Vec<String>>, } impl Part for RegexValidation {} /// A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct HadoopJob { /// Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. #[serde(rename="jarFileUris")] pub jar_file_uris: Option<Vec<String>>, /// Optional. The runtime log config for job execution. #[serde(rename="loggingConfig")] pub logging_config: Option<LoggingConfig>, /// Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission. pub args: Option<Vec<String>>, /// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. #[serde(rename="fileUris")] pub file_uris: Option<Vec<String>>, /// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. #[serde(rename="mainClass")] pub main_class: Option<String>, /// Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. #[serde(rename="archiveUris")] pub archive_uris: Option<Vec<String>>, /// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' #[serde(rename="mainJarFileUri")] pub main_jar_file_uri: Option<String>, /// Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. pub properties: Option<HashMap<String, String>>, } impl Part for HadoopJob {} /// A selector that chooses target cluster for jobs based on metadata. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ClusterSelector { /// Required. The cluster labels. Cluster must have all labels to match. #[serde(rename="clusterLabels")] pub cluster_labels: Option<HashMap<String, String>>, /// Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used. pub zone: Option<String>, } impl Part for ClusterSelector {} /// Specifies the selection and config of software inside the cluster. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SoftwareConfig { /// Optional. The version of software inside the cluster. It must be one of the supported Cloud Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest version. #[serde(rename="imageVersion")] pub image_version: Option<String>, /// Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, such as core:fs.defaultFS. The following are supported prefixes and their mappings: /// capacity-scheduler: capacity-scheduler.xml /// core: core-site.xml /// distcp: distcp-default.xml /// hdfs: hdfs-site.xml /// hive: hive-site.xml /// mapred: mapred-site.xml /// pig: pig.properties /// spark: spark-defaults.conf /// yarn: yarn-site.xmlFor more information, see Cluster properties. pub properties: Option<HashMap<String, String>>, } impl Part for SoftwareConfig {} /// The response message for Operations.ListOperations. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions operations list projects](struct.ProjectRegionOperationListCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListOperationsResponse { /// The standard List next-page token. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, /// A list of operations that matches the specified filter in the request. pub operations: Option<Vec<Operation>>, } impl ResponseResult for ListOperationsResponse {} /// A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct PigJob { /// The HCFS URI of the script that contains the Pig queries. #[serde(rename="queryFileUri")] pub query_file_uri: Option<String>, /// Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]). #[serde(rename="scriptVariables")] pub script_variables: Option<HashMap<String, String>>, /// Optional. The runtime log config for job execution. #[serde(rename="loggingConfig")] pub logging_config: Option<LoggingConfig>, /// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. #[serde(rename="continueOnFailure")] pub continue_on_failure: Option<bool>, /// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. #[serde(rename="jarFileUris")] pub jar_file_uris: Option<Vec<String>>, /// A list of queries. #[serde(rename="queryList")] pub query_list: Option<QueryList>, /// Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. pub properties: Option<HashMap<String, String>>, } impl Part for PigJob {} /// Associates members with a role. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Binding { /// Role that is assigned to members. For example, roles/viewer, roles/editor, or roles/owner. pub role: Option<String>, /// Unimplemented. The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently. pub condition: Option<Expr>, /// Specifies the identities requesting access for a Cloud Platform resource. members can have the following values: /// allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. /// allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. /// user:{emailid}: An email address that represents a specific Google account. For example, [email protected] . /// serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected]. /// group:{emailid}: An email address that represents a Google group. For example, [email protected]. /// domain:{domain}: A Google Apps domain name that represents all the users of that domain. For example, google.com or example.com. pub members: Option<Vec<String>>, } impl Part for Binding {} /// Cluster that is managed by the workflow. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ManagedCluster { /// Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. #[serde(rename="clusterName")] pub cluster_name: Option<String>, /// Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster. pub labels: Option<HashMap<String, String>>, /// Required. The cluster configuration. pub config: Option<ClusterConfig>, } impl Part for ManagedCluster {} /// Describes the identifying information, config, and status of a cluster of Compute Engine instances. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions clusters patch projects](struct.ProjectRegionClusterPatchCall.html) (request) /// * [regions clusters create projects](struct.ProjectRegionClusterCreateCall.html) (request) /// * [regions clusters get projects](struct.ProjectRegionClusterGetCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Cluster { /// Output only. Cluster status. pub status: Option<ClusterStatus>, /// Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster. #[serde(rename="clusterUuid")] pub cluster_uuid: Option<String>, /// Required. The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused. #[serde(rename="clusterName")] pub cluster_name: Option<String>, /// Required. The Google Cloud Platform project ID that the cluster belongs to. #[serde(rename="projectId")] pub project_id: Option<String>, /// Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster. pub labels: Option<HashMap<String, String>>, /// Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release. pub metrics: Option<ClusterMetrics>, /// Output only. The previous cluster status. #[serde(rename="statusHistory")] pub status_history: Option<Vec<ClusterStatus>>, /// Required. The cluster config. Note that Cloud Dataproc may set default values, and values may change when clusters are updated. pub config: Option<ClusterConfig>, } impl RequestValue for Cluster {} impl ResponseResult for Cluster {} /// Configuration for parameter validation. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ParameterValidation { /// Validation based on regular expressions. pub regex: Option<RegexValidation>, /// Validation based on a list of allowed values. pub values: Option<ValueValidation>, } impl Part for ParameterValidation {} /// A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SparkJob { /// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. #[serde(rename="jarFileUris")] pub jar_file_uris: Option<Vec<String>>, /// Optional. The runtime log config for job execution. #[serde(rename="loggingConfig")] pub logging_config: Option<LoggingConfig>, /// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. pub args: Option<Vec<String>>, /// Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks. #[serde(rename="fileUris")] pub file_uris: Option<Vec<String>>, /// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris. #[serde(rename="mainClass")] pub main_class: Option<String>, /// Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. #[serde(rename="archiveUris")] pub archive_uris: Option<Vec<String>>, /// The HCFS URI of the jar file that contains the main class. #[serde(rename="mainJarFileUri")] pub main_jar_file_uri: Option<String>, /// Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. pub properties: Option<HashMap<String, String>>, } impl Part for SparkJob {} /// Encryption settings for the cluster. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct EncryptionConfig { /// Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster. #[serde(rename="gcePdKmsKeyName")] pub gce_pd_kms_key_name: Option<String>, } impl Part for EncryptionConfig {} /// This resource represents a long-running operation that is the result of a network API call. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions workflow templates instantiate projects](struct.ProjectRegionWorkflowTemplateInstantiateCall.html) (response) /// * [regions clusters patch projects](struct.ProjectRegionClusterPatchCall.html) (response) /// * [regions operations get projects](struct.ProjectRegionOperationGetCall.html) (response) /// * [regions clusters delete projects](struct.ProjectRegionClusterDeleteCall.html) (response) /// * [regions clusters diagnose projects](struct.ProjectRegionClusterDiagnoseCall.html) (response) /// * [regions clusters create projects](struct.ProjectRegionClusterCreateCall.html) (response) /// * [locations workflow templates instantiate inline projects](struct.ProjectLocationWorkflowTemplateInstantiateInlineCall.html) (response) /// * [regions workflow templates instantiate inline projects](struct.ProjectRegionWorkflowTemplateInstantiateInlineCall.html) (response) /// * [locations workflow templates instantiate projects](struct.ProjectLocationWorkflowTemplateInstantiateCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Operation { /// The error result of the operation in case of failure or cancellation. pub error: Option<Status>, /// If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. pub done: Option<bool>, /// The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. pub response: Option<HashMap<String, String>>, /// The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name. pub name: Option<String>, /// Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. pub metadata: Option<HashMap<String, String>>, } impl ResponseResult for Operation {} /// Validation based on a list of allowed values. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ValueValidation { /// Required. List of allowed values for the parameter. pub values: Option<Vec<String>>, } impl Part for ValueValidation {} /// A request to cancel a job. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [regions jobs cancel projects](struct.ProjectRegionJobCancelCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct CancelJobRequest { _never_set: Option<bool> } impl RequestValue for CancelJobRequest {} // ################### // MethodBuilders ### // ################# /// A builder providing access to all methods supported on *project* resources. /// It is not used directly, but through the `Dataproc` hub. /// /// # Example /// /// Instantiate a resource builder /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_dataproc1 as dataproc1; /// /// # #[test] fn egal() { /// use std::default::Default; /// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// use dataproc1::Dataproc; /// /// let secret: ApplicationSecret = Default::default(); /// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// <MemoryStorage as Default>::default(), None); /// let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders* /// // like `locations_workflow_templates_create(...)`, `locations_workflow_templates_delete(...)`, `locations_workflow_templates_get(...)`, `locations_workflow_templates_get_iam_policy(...)`, `locations_workflow_templates_instantiate(...)`, `locations_workflow_templates_instantiate_inline(...)`, `locations_workflow_templates_list(...)`, `locations_workflow_templates_set_iam_policy(...)`, `locations_workflow_templates_test_iam_permissions(...)`, `locations_workflow_templates_update(...)`, `regions_clusters_create(...)`, `regions_clusters_delete(...)`, `regions_clusters_diagnose(...)`, `regions_clusters_get(...)`, `regions_clusters_get_iam_policy(...)`, `regions_clusters_list(...)`, `regions_clusters_patch(...)`, `regions_clusters_set_iam_policy(...)`, `regions_clusters_test_iam_permissions(...)`, `regions_jobs_cancel(...)`, `regions_jobs_delete(...)`, `regions_jobs_get(...)`, `regions_jobs_get_iam_policy(...)`, `regions_jobs_list(...)`, `regions_jobs_patch(...)`, `regions_jobs_set_iam_policy(...)`, `regions_jobs_submit(...)`, `regions_jobs_test_iam_permissions(...)`, `regions_operations_cancel(...)`, `regions_operations_delete(...)`, `regions_operations_get(...)`, `regions_operations_get_iam_policy(...)`, `regions_operations_list(...)`, `regions_operations_set_iam_policy(...)`, `regions_operations_test_iam_permissions(...)`, `regions_workflow_templates_create(...)`, `regions_workflow_templates_delete(...)`, `regions_workflow_templates_get(...)`, `regions_workflow_templates_get_iam_policy(...)`, `regions_workflow_templates_instantiate(...)`, `regions_workflow_templates_instantiate_inline(...)`, `regions_workflow_templates_list(...)`, `regions_workflow_templates_set_iam_policy(...)`, `regions_workflow_templates_test_iam_permissions(...)` and `regions_workflow_templates_update(...)` /// // to build up your call. /// let rb = hub.projects(); /// # } /// ``` pub struct ProjectMethods<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, } impl<'a, C, A> MethodsBuilder for ProjectMethods<'a, C, A> {} impl<'a, C, A> ProjectMethods<'a, C, A> { /// Create a builder to help you perform the following task: /// /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_workflow_templates_test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> { ProjectRegionWorkflowTemplateTestIamPermissionCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the resource representation for a cluster in a project. /// /// # Arguments /// /// * `projectId` - Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `clusterName` - Required. The cluster name. pub fn regions_clusters_get(&self, project_id: &str, region: &str, cluster_name: &str) -> ProjectRegionClusterGetCall<'a, C, A> { ProjectRegionClusterGetCall { hub: self.hub, _project_id: project_id.to_string(), _region: region.to_string(), _cluster_name: cluster_name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_operations_test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> ProjectRegionOperationTestIamPermissionCall<'a, C, A> { ProjectRegionOperationTestIamPermissionCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists workflows that match the specified filter in the request. /// /// # Arguments /// /// * `parent` - Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} pub fn locations_workflow_templates_list(&self, parent: &str) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> { ProjectLocationWorkflowTemplateListCall { hub: self.hub, _parent: parent.to_string(), _page_token: Default::default(), _page_size: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. /// /// # Arguments /// /// * `name` - The name of the operation resource. pub fn regions_operations_get(&self, name: &str) -> ProjectRegionOperationGetCall<'a, C, A> { ProjectRegionOperationGetCall { hub: self.hub, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists workflows that match the specified filter in the request. /// /// # Arguments /// /// * `parent` - Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} pub fn regions_workflow_templates_list(&self, parent: &str) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> { ProjectRegionWorkflowTemplateListCall { hub: self.hub, _parent: parent.to_string(), _page_token: Default::default(), _page_size: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. pub fn regions_operations_set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> ProjectRegionOperationSetIamPolicyCall<'a, C, A> { ProjectRegionOperationSetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter. /// /// # Arguments /// /// * `name` - Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn locations_workflow_templates_get(&self, name: &str) -> ProjectLocationWorkflowTemplateGetCall<'a, C, A> { ProjectLocationWorkflowTemplateGetCall { hub: self.hub, _name: name.to_string(), _version: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates a job in a project. /// /// # Arguments /// /// * `request` - No description provided. /// * `projectId` - Required. The ID of the Google Cloud Platform project that the job belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `jobId` - Required. The job ID. pub fn regions_jobs_patch(&self, request: Job, project_id: &str, region: &str, job_id: &str) -> ProjectRegionJobPatchCall<'a, C, A> { ProjectRegionJobPatchCall { hub: self.hub, _request: request, _project_id: project_id.to_string(), _region: region.to_string(), _job_id: job_id.to_string(), _update_mask: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_jobs_test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> ProjectRegionJobTestIamPermissionCall<'a, C, A> { ProjectRegionJobTestIamPermissionCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn regions_workflow_templates_instantiate(&self, request: InstantiateWorkflowTemplateRequest, name: &str) -> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> { ProjectRegionWorkflowTemplateInstantiateCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. pub fn locations_workflow_templates_test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> { ProjectLocationWorkflowTemplateTestIamPermissionCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. pub fn regions_clusters_set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> ProjectRegionClusterSetIamPolicyCall<'a, C, A> { ProjectRegionClusterSetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Creates new workflow template. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} pub fn locations_workflow_templates_create(&self, request: WorkflowTemplate, parent: &str) -> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> { ProjectLocationWorkflowTemplateCreateCall { hub: self.hub, _request: request, _parent: parent.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_clusters_test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> ProjectRegionClusterTestIamPermissionCall<'a, C, A> { ProjectRegionClusterTestIamPermissionCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates a cluster in a project. /// /// # Arguments /// /// * `request` - No description provided. /// * `projectId` - Required. The ID of the Google Cloud Platform project the cluster belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `clusterName` - Required. The cluster name. pub fn regions_clusters_patch(&self, request: Cluster, project_id: &str, region: &str, cluster_name: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { ProjectRegionClusterPatchCall { hub: self.hub, _request: request, _project_id: project_id.to_string(), _region: region.to_string(), _cluster_name: cluster_name.to_string(), _update_mask: Default::default(), _request_id: Default::default(), _graceful_decommission_timeout: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. /// /// # Arguments /// /// * `name` - The name of the operation resource to be deleted. pub fn regions_operations_delete(&self, name: &str) -> ProjectRegionOperationDeleteCall<'a, C, A> { ProjectRegionOperationDeleteCall { hub: self.hub, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Submits a job to a cluster. /// /// # Arguments /// /// * `request` - No description provided. /// * `projectId` - Required. The ID of the Google Cloud Platform project that the job belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. pub fn regions_jobs_submit(&self, request: SubmitJobRequest, project_id: &str, region: &str) -> ProjectRegionJobSubmitCall<'a, C, A> { ProjectRegionJobSubmitCall { hub: self.hub, _request: request, _project_id: project_id.to_string(), _region: region.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Creates a cluster in a project. /// /// # Arguments /// /// * `request` - No description provided. /// * `projectId` - Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. pub fn regions_clusters_create(&self, request: Cluster, project_id: &str, region: &str) -> ProjectRegionClusterCreateCall<'a, C, A> { ProjectRegionClusterCreateCall { hub: self.hub, _request: request, _project_id: project_id.to_string(), _region: region.to_string(), _request_id: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. pub fn locations_workflow_templates_set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> { ProjectLocationWorkflowTemplateSetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The "resource name" of the workflow template region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} pub fn locations_workflow_templates_instantiate_inline(&self, request: WorkflowTemplate, parent: &str) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> { ProjectLocationWorkflowTemplateInstantiateInlineCall { hub: self.hub, _request: request, _parent: parent.to_string(), _request_id: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates (replaces) workflow template. The updated template must contain version that matches the current server version. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Output only. The "resource name" of the template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn locations_workflow_templates_update(&self, request: WorkflowTemplate, name: &str) -> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> { ProjectLocationWorkflowTemplateUpdateCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn locations_workflow_templates_instantiate(&self, request: InstantiateWorkflowTemplateRequest, name: &str) -> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> { ProjectLocationWorkflowTemplateInstantiateCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_clusters_get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> ProjectRegionClusterGetIamPolicyCall<'a, C, A> { ProjectRegionClusterGetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists all regions/{region}/clusters in a project. /// /// # Arguments /// /// * `projectId` - Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. pub fn regions_clusters_list(&self, project_id: &str, region: &str) -> ProjectRegionClusterListCall<'a, C, A> { ProjectRegionClusterListCall { hub: self.hub, _project_id: project_id.to_string(), _region: region.to_string(), _page_token: Default::default(), _page_size: Default::default(), _filter: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists regions/{region}/jobs in a project. /// /// # Arguments /// /// * `projectId` - Required. The ID of the Google Cloud Platform project that the job belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. pub fn regions_jobs_list(&self, project_id: &str, region: &str) -> ProjectRegionJobListCall<'a, C, A> { ProjectRegionJobListCall { hub: self.hub, _project_id: project_id.to_string(), _region: region.to_string(), _page_token: Default::default(), _page_size: Default::default(), _job_state_matcher: Default::default(), _filter: Default::default(), _cluster_name: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_jobs_get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> ProjectRegionJobGetIamPolicyCall<'a, C, A> { ProjectRegionJobGetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. /// /// # Arguments /// /// * `name` - The name of the operation resource to be cancelled. pub fn regions_operations_cancel(&self, name: &str) -> ProjectRegionOperationCancelCall<'a, C, A> { ProjectRegionOperationCancelCall { hub: self.hub, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation. /// /// # Arguments /// /// * `request` - No description provided. /// * `projectId` - Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `clusterName` - Required. The cluster name. pub fn regions_clusters_diagnose(&self, request: DiagnoseClusterRequest, project_id: &str, region: &str, cluster_name: &str) -> ProjectRegionClusterDiagnoseCall<'a, C, A> { ProjectRegionClusterDiagnoseCall { hub: self.hub, _request: request, _project_id: project_id.to_string(), _region: region.to_string(), _cluster_name: cluster_name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates (replaces) workflow template. The updated template must contain version that matches the current server version. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Output only. The "resource name" of the template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn regions_workflow_templates_update(&self, request: WorkflowTemplate, name: &str) -> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> { ProjectRegionWorkflowTemplateUpdateCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. /// /// # Arguments /// /// * `name` - The name of the operation's parent resource. pub fn regions_operations_list(&self, name: &str) -> ProjectRegionOperationListCall<'a, C, A> { ProjectRegionOperationListCall { hub: self.hub, _name: name.to_string(), _page_token: Default::default(), _page_size: Default::default(), _filter: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes a workflow template. It does not cancel in-progress workflows. /// /// # Arguments /// /// * `name` - Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn locations_workflow_templates_delete(&self, name: &str) -> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> { ProjectLocationWorkflowTemplateDeleteCall { hub: self.hub, _name: name.to_string(), _version: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. pub fn regions_workflow_templates_set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> { ProjectRegionWorkflowTemplateSetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_workflow_templates_get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> { ProjectRegionWorkflowTemplateGetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the resource representation for a job in a project. /// /// # Arguments /// /// * `projectId` - Required. The ID of the Google Cloud Platform project that the job belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `jobId` - Required. The job ID. pub fn regions_jobs_get(&self, project_id: &str, region: &str, job_id: &str) -> ProjectRegionJobGetCall<'a, C, A> { ProjectRegionJobGetCall { hub: self.hub, _project_id: project_id.to_string(), _region: region.to_string(), _job_id: job_id.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. pub fn locations_workflow_templates_get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> { ProjectLocationWorkflowTemplateGetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes a cluster in a project. /// /// # Arguments /// /// * `projectId` - Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `clusterName` - Required. The cluster name. pub fn regions_clusters_delete(&self, project_id: &str, region: &str, cluster_name: &str) -> ProjectRegionClusterDeleteCall<'a, C, A> { ProjectRegionClusterDeleteCall { hub: self.hub, _project_id: project_id.to_string(), _region: region.to_string(), _cluster_name: cluster_name.to_string(), _request_id: Default::default(), _cluster_uuid: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The "resource name" of the workflow template region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} pub fn regions_workflow_templates_instantiate_inline(&self, request: WorkflowTemplate, parent: &str) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> { ProjectRegionWorkflowTemplateInstantiateInlineCall { hub: self.hub, _request: request, _parent: parent.to_string(), _request_id: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes a workflow template. It does not cancel in-progress workflows. /// /// # Arguments /// /// * `name` - Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn regions_workflow_templates_delete(&self, name: &str) -> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> { ProjectRegionWorkflowTemplateDeleteCall { hub: self.hub, _name: name.to_string(), _version: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. pub fn regions_operations_get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> ProjectRegionOperationGetIamPolicyCall<'a, C, A> { ProjectRegionOperationGetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter. /// /// # Arguments /// /// * `name` - Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} pub fn regions_workflow_templates_get(&self, name: &str) -> ProjectRegionWorkflowTemplateGetCall<'a, C, A> { ProjectRegionWorkflowTemplateGetCall { hub: self.hub, _name: name.to_string(), _version: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Creates new workflow template. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} pub fn regions_workflow_templates_create(&self, request: WorkflowTemplate, parent: &str) -> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> { ProjectRegionWorkflowTemplateCreateCall { hub: self.hub, _request: request, _parent: parent.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION. /// /// # Arguments /// /// * `projectId` - Required. The ID of the Google Cloud Platform project that the job belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `jobId` - Required. The job ID. pub fn regions_jobs_delete(&self, project_id: &str, region: &str, job_id: &str) -> ProjectRegionJobDeleteCall<'a, C, A> { ProjectRegionJobDeleteCall { hub: self.hub, _project_id: project_id.to_string(), _region: region.to_string(), _job_id: job_id.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. pub fn regions_jobs_set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> ProjectRegionJobSetIamPolicyCall<'a, C, A> { ProjectRegionJobSetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. /// /// # Arguments /// /// * `request` - No description provided. /// * `projectId` - Required. The ID of the Google Cloud Platform project that the job belongs to. /// * `region` - Required. The Cloud Dataproc region in which to handle the request. /// * `jobId` - Required. The job ID. pub fn regions_jobs_cancel(&self, request: CancelJobRequest, project_id: &str, region: &str, job_id: &str) -> ProjectRegionJobCancelCall<'a, C, A> { ProjectRegionJobCancelCall { hub: self.hub, _request: request, _project_id: project_id.to_string(), _region: region.to_string(), _job_id: job_id.to_string(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } } // ################### // CallBuilders ### // ################# /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// A builder for the *regions.workflowTemplates.testIamPermissions* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::TestIamPermissionsRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = TestIamPermissionsRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_test_iam_permissions(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: TestIamPermissionsRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, TestIamPermissionsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.testIamPermissions", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:testIamPermissions"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: TestIamPermissionsRequest) -> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateTestIamPermissionCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the resource representation for a cluster in a project. /// /// A builder for the *regions.clusters.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_get("projectId", "region", "clusterName") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterGetCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _project_id: String, _region: String, _cluster_name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterGetCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Cluster)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.get", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("clusterName", self._cluster_name.to_string())); for &field in ["alt", "projectId", "region", "clusterName"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{clusterName}", "clusterName")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["clusterName", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionClusterGetCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionClusterGetCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The cluster name. /// /// Sets the *cluster name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn cluster_name(mut self, new_value: &str) -> ProjectRegionClusterGetCall<'a, C, A> { self._cluster_name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterGetCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterGetCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterGetCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// A builder for the *regions.operations.testIamPermissions* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::TestIamPermissionsRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = TestIamPermissionsRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_test_iam_permissions(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationTestIamPermissionCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: TestIamPermissionsRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationTestIamPermissionCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationTestIamPermissionCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, TestIamPermissionsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.testIamPermissions", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:testIamPermissions"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: TestIamPermissionsRequest) -> ProjectRegionOperationTestIamPermissionCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionOperationTestIamPermissionCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationTestIamPermissionCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationTestIamPermissionCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationTestIamPermissionCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists workflows that match the specified filter in the request. /// /// A builder for the *locations.workflowTemplates.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_list("parent") /// .page_token("justo") /// .page_size(-1) /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateListCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _parent: String, _page_token: Option<String>, _page_size: Option<i32>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateListCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, ListWorkflowTemplatesResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.list", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } for &field in ["alt", "parent", "pageToken", "pageSize"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+parent}/workflowTemplates"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> { self._parent = new_value.to_string(); self } /// Optional. The page token, returned by a previous call, to request the next page of results. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> { self._page_token = Some(new_value.to_string()); self } /// Optional. The maximum number of results to return in each response. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> { self._page_size = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateListCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. /// /// A builder for the *regions.operations.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_get("name") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationGetCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationGetCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.get", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The name of the operation resource. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionOperationGetCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationGetCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationGetCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationGetCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists workflows that match the specified filter in the request. /// /// A builder for the *regions.workflowTemplates.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_list("parent") /// .page_token("sea") /// .page_size(-90) /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateListCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _parent: String, _page_token: Option<String>, _page_size: Option<i32>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateListCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, ListWorkflowTemplatesResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.list", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } for &field in ["alt", "parent", "pageToken", "pageSize"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+parent}/workflowTemplates"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> { self._parent = new_value.to_string(); self } /// Optional. The page token, returned by a previous call, to request the next page of results. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> { self._page_token = Some(new_value.to_string()); self } /// Optional. The maximum number of results to return in each response. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> { self._page_size = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateListCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// A builder for the *regions.operations.setIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::SetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_set_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationSetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: SetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationSetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationSetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.setIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:setIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SetIamPolicyRequest) -> ProjectRegionOperationSetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionOperationSetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationSetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationSetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationSetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter. /// /// A builder for the *locations.workflowTemplates.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_get("name") /// .version(-95) /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateGetCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _version: Option<i32>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateGetCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WorkflowTemplate)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.get", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._version { params.push(("version", value.to_string())); } for &field in ["alt", "name", "version"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateGetCall<'a, C, A> { self._name = new_value.to_string(); self } /// Optional. The version of workflow template to retrieve. Only previously instatiated versions can be retrieved.If unspecified, retrieves the current version. /// /// Sets the *version* query property to the given value. pub fn version(mut self, new_value: i32) -> ProjectLocationWorkflowTemplateGetCall<'a, C, A> { self._version = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateGetCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateGetCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateGetCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates a job in a project. /// /// A builder for the *regions.jobs.patch* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::Job; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = Job::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_patch(req, "projectId", "region", "jobId") /// .update_mask("justo") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobPatchCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: Job, _project_id: String, _region: String, _job_id: String, _update_mask: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobPatchCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobPatchCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Job)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.patch", http_method: hyper::method::Method::Patch }); let mut params: Vec<(&str, String)> = Vec::with_capacity(7 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("jobId", self._job_id.to_string())); if let Some(value) = self._update_mask { params.push(("updateMask", value.to_string())); } for &field in ["alt", "projectId", "region", "jobId", "updateMask"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/jobs/{jobId}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{jobId}", "jobId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["jobId", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Patch, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: Job) -> ProjectRegionJobPatchCall<'a, C, A> { self._request = new_value; self } /// Required. The ID of the Google Cloud Platform project that the job belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionJobPatchCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionJobPatchCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The job ID. /// /// Sets the *job id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn
(mut self, new_value: &str) -> ProjectRegionJobPatchCall<'a, C, A> { self._job_id = new_value.to_string(); self } /// Required. Specifies the path, relative to <code>Job</code>, of the field to update. For example, to update the labels of a Job the <code>update_mask</code> parameter would be specified as <code>labels</code>, and the PATCH request body would specify the new value. <strong>Note:</strong> Currently, <code>labels</code> is the only field that can be updated. /// /// Sets the *update mask* query property to the given value. pub fn update_mask(mut self, new_value: &str) -> ProjectRegionJobPatchCall<'a, C, A> { self._update_mask = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobPatchCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobPatchCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobPatchCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// A builder for the *regions.jobs.testIamPermissions* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::TestIamPermissionsRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = TestIamPermissionsRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_test_iam_permissions(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobTestIamPermissionCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: TestIamPermissionsRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobTestIamPermissionCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobTestIamPermissionCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, TestIamPermissionsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.testIamPermissions", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:testIamPermissions"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: TestIamPermissionsRequest) -> ProjectRegionJobTestIamPermissionCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionJobTestIamPermissionCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobTestIamPermissionCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobTestIamPermissionCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobTestIamPermissionCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// A builder for the *regions.workflowTemplates.instantiate* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::InstantiateWorkflowTemplateRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = InstantiateWorkflowTemplateRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_instantiate(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: InstantiateWorkflowTemplateRequest, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.instantiate", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}:instantiate"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: InstantiateWorkflowTemplateRequest) -> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> { self._request = new_value; self } /// Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateInstantiateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// A builder for the *locations.workflowTemplates.testIamPermissions* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::TestIamPermissionsRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = TestIamPermissionsRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_test_iam_permissions(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: TestIamPermissionsRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, TestIamPermissionsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.testIamPermissions", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:testIamPermissions"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: TestIamPermissionsRequest) -> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateTestIamPermissionCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// A builder for the *regions.clusters.setIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::SetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_set_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterSetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: SetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterSetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterSetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.setIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:setIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SetIamPolicyRequest) -> ProjectRegionClusterSetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionClusterSetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterSetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterSetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterSetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Creates new workflow template. /// /// A builder for the *locations.workflowTemplates.create* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::WorkflowTemplate; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = WorkflowTemplate::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_create(req, "parent") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateCreateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: WorkflowTemplate, _parent: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateCreateCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WorkflowTemplate)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.create", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); for &field in ["alt", "parent"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+parent}/workflowTemplates"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: WorkflowTemplate) -> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> { self._request = new_value; self } /// Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> { self._parent = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateCreateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. /// /// A builder for the *regions.clusters.testIamPermissions* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::TestIamPermissionsRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = TestIamPermissionsRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_test_iam_permissions(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterTestIamPermissionCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: TestIamPermissionsRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterTestIamPermissionCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterTestIamPermissionCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, TestIamPermissionsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.testIamPermissions", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:testIamPermissions"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: TestIamPermissionsRequest) -> ProjectRegionClusterTestIamPermissionCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionClusterTestIamPermissionCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterTestIamPermissionCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterTestIamPermissionCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterTestIamPermissionCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates a cluster in a project. /// /// A builder for the *regions.clusters.patch* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::Cluster; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = Cluster::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_patch(req, "projectId", "region", "clusterName") /// .update_mask("sea") /// .request_id("Lorem") /// .graceful_decommission_timeout("eos") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterPatchCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: Cluster, _project_id: String, _region: String, _cluster_name: String, _update_mask: Option<String>, _request_id: Option<String>, _graceful_decommission_timeout: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterPatchCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterPatchCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.patch", http_method: hyper::method::Method::Patch }); let mut params: Vec<(&str, String)> = Vec::with_capacity(9 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("clusterName", self._cluster_name.to_string())); if let Some(value) = self._update_mask { params.push(("updateMask", value.to_string())); } if let Some(value) = self._request_id { params.push(("requestId", value.to_string())); } if let Some(value) = self._graceful_decommission_timeout { params.push(("gracefulDecommissionTimeout", value.to_string())); } for &field in ["alt", "projectId", "region", "clusterName", "updateMask", "requestId", "gracefulDecommissionTimeout"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{clusterName}", "clusterName")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["clusterName", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Patch, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: Cluster) -> ProjectRegionClusterPatchCall<'a, C, A> { self._request = new_value; self } /// Required. The ID of the Google Cloud Platform project the cluster belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The cluster name. /// /// Sets the *cluster name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn cluster_name(mut self, new_value: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { self._cluster_name = new_value.to_string(); self } /// Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: /// { /// "config":{ /// "workerConfig":{ /// "numInstances":"5" /// } /// } /// } /// Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: /// { /// "config":{ /// "secondaryWorkerConfig":{ /// "numInstances":"5" /// } /// } /// } /// <strong>Note:</strong> Currently, only the following fields can be updated:<table> <tbody> <tr> <td><strong>Mask</strong></td> <td><strong>Purpose</strong></td> </tr> <tr> <td><strong><em>labels</em></strong></td> <td>Update labels</td> </tr> <tr> <td><strong><em>config.worker_config.num_instances</em></strong></td> <td>Resize primary worker group</td> </tr> <tr> <td><strong><em>config.secondary_worker_config.num_instances</em></strong></td> <td>Resize secondary worker group</td> </tr> </tbody> </table> /// /// Sets the *update mask* query property to the given value. pub fn update_mask(mut self, new_value: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { self._update_mask = Some(new_value.to_string()); self } /// Optional. A unique id used to identify the request. If the server receives two UpdateClusterRequest requests with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. /// /// Sets the *request id* query property to the given value. pub fn request_id(mut self, new_value: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { self._request_id = Some(new_value.to_string()); self } /// Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day.Only supported on Dataproc image versions 1.2 and higher. /// /// Sets the *graceful decommission timeout* query property to the given value. pub fn graceful_decommission_timeout(mut self, new_value: &str) -> ProjectRegionClusterPatchCall<'a, C, A> { self._graceful_decommission_timeout = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterPatchCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterPatchCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterPatchCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. /// /// A builder for the *regions.operations.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_delete("name") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationDeleteCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationDeleteCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.delete", http_method: hyper::method::Method::Delete }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The name of the operation resource to be deleted. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionOperationDeleteCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationDeleteCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationDeleteCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationDeleteCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Submits a job to a cluster. /// /// A builder for the *regions.jobs.submit* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::SubmitJobRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SubmitJobRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_submit(req, "projectId", "region") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobSubmitCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: SubmitJobRequest, _project_id: String, _region: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobSubmitCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobSubmitCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Job)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.submit", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); for &field in ["alt", "projectId", "region"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/jobs:submit"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2); for param_name in ["region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SubmitJobRequest) -> ProjectRegionJobSubmitCall<'a, C, A> { self._request = new_value; self } /// Required. The ID of the Google Cloud Platform project that the job belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionJobSubmitCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionJobSubmitCall<'a, C, A> { self._region = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobSubmitCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobSubmitCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobSubmitCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Creates a cluster in a project. /// /// A builder for the *regions.clusters.create* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::Cluster; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = Cluster::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_create(req, "projectId", "region") /// .request_id("amet") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterCreateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: Cluster, _project_id: String, _region: String, _request_id: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterCreateCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterCreateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.create", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); if let Some(value) = self._request_id { params.push(("requestId", value.to_string())); } for &field in ["alt", "projectId", "region", "requestId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/clusters"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2); for param_name in ["region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: Cluster) -> ProjectRegionClusterCreateCall<'a, C, A> { self._request = new_value; self } /// Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionClusterCreateCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionClusterCreateCall<'a, C, A> { self._region = new_value.to_string(); self } /// Optional. A unique id used to identify the request. If the server receives two CreateClusterRequest requests with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. /// /// Sets the *request id* query property to the given value. pub fn request_id(mut self, new_value: &str) -> ProjectRegionClusterCreateCall<'a, C, A> { self._request_id = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterCreateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterCreateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterCreateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// A builder for the *locations.workflowTemplates.setIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::SetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_set_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: SetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.setIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:setIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SetIamPolicyRequest) -> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateSetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// A builder for the *locations.workflowTemplates.instantiateInline* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::WorkflowTemplate; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = WorkflowTemplate::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_instantiate_inline(req, "parent") /// .request_id("eirmod") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: WorkflowTemplate, _parent: String, _request_id: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.instantiateInline", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); if let Some(value) = self._request_id { params.push(("requestId", value.to_string())); } for &field in ["alt", "parent", "requestId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+parent}/workflowTemplates:instantiateInline"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: WorkflowTemplate) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._request = new_value; self } /// Required. The "resource name" of the workflow template region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._parent = new_value.to_string(); self } /// Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. /// /// Sets the *request id* query property to the given value. pub fn request_id(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._request_id = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateInstantiateInlineCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates (replaces) workflow template. The updated template must contain version that matches the current server version. /// /// A builder for the *locations.workflowTemplates.update* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::WorkflowTemplate; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = WorkflowTemplate::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_update(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: WorkflowTemplate, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WorkflowTemplate)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.update", http_method: hyper::method::Method::Put }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Put, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: WorkflowTemplate) -> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> { self._request = new_value; self } /// Output only. The "resource name" of the template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateUpdateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// A builder for the *locations.workflowTemplates.instantiate* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::InstantiateWorkflowTemplateRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = InstantiateWorkflowTemplateRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_instantiate(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: InstantiateWorkflowTemplateRequest, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.instantiate", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}:instantiate"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: InstantiateWorkflowTemplateRequest) -> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> { self._request = new_value; self } /// Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateInstantiateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// A builder for the *regions.clusters.getIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::GetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_get_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterGetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: GetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterGetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterGetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.getIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:getIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: GetIamPolicyRequest) -> ProjectRegionClusterGetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionClusterGetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterGetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterGetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterGetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists all regions/{region}/clusters in a project. /// /// A builder for the *regions.clusters.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_list("projectId", "region") /// .page_token("sea") /// .page_size(-21) /// .filter("duo") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterListCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _project_id: String, _region: String, _page_token: Option<String>, _page_size: Option<i32>, _filter: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterListCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, ListClustersResponse)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.list", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(7 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } if let Some(value) = self._filter { params.push(("filter", value.to_string())); } for &field in ["alt", "projectId", "region", "pageToken", "pageSize", "filter"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/clusters"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2); for param_name in ["region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionClusterListCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionClusterListCall<'a, C, A> { self._region = new_value.to_string(); self } /// Optional. The standard List page token. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectRegionClusterListCall<'a, C, A> { self._page_token = Some(new_value.to_string()); self } /// Optional. The standard List page size. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectRegionClusterListCall<'a, C, A> { self._page_size = Some(new_value); self } /// Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING and ERROR states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = * /// /// Sets the *filter* query property to the given value. pub fn filter(mut self, new_value: &str) -> ProjectRegionClusterListCall<'a, C, A> { self._filter = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterListCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterListCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterListCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists regions/{region}/jobs in a project. /// /// A builder for the *regions.jobs.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_list("projectId", "region") /// .page_token("sanctus") /// .page_size(-22) /// .job_state_matcher("amet") /// .filter("et") /// .cluster_name("consetetur") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobListCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _project_id: String, _region: String, _page_token: Option<String>, _page_size: Option<i32>, _job_state_matcher: Option<String>, _filter: Option<String>, _cluster_name: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobListCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, ListJobsResponse)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.list", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(9 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } if let Some(value) = self._job_state_matcher { params.push(("jobStateMatcher", value.to_string())); } if let Some(value) = self._filter { params.push(("filter", value.to_string())); } if let Some(value) = self._cluster_name { params.push(("clusterName", value.to_string())); } for &field in ["alt", "projectId", "region", "pageToken", "pageSize", "jobStateMatcher", "filter", "clusterName"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/jobs"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2); for param_name in ["region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The ID of the Google Cloud Platform project that the job belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> { self._region = new_value.to_string(); self } /// Optional. The page token, returned by a previous call, to request the next page of results. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> { self._page_token = Some(new_value.to_string()); self } /// Optional. The number of results to return in each response. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectRegionJobListCall<'a, C, A> { self._page_size = Some(new_value); self } /// Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs).If filter is provided, jobStateMatcher will be ignored. /// /// Sets the *job state matcher* query property to the given value. pub fn job_state_matcher(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> { self._job_state_matcher = Some(new_value.to_string()); self } /// Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = * /// /// Sets the *filter* query property to the given value. pub fn filter(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> { self._filter = Some(new_value.to_string()); self } /// Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster. /// /// Sets the *cluster name* query property to the given value. pub fn cluster_name(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> { self._cluster_name = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobListCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobListCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobListCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// A builder for the *regions.jobs.getIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::GetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_get_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobGetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: GetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobGetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobGetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.getIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:getIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: GetIamPolicyRequest) -> ProjectRegionJobGetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionJobGetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobGetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobGetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobGetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. /// /// A builder for the *regions.operations.cancel* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_cancel("name") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationCancelCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationCancelCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationCancelCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.cancel", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}:cancel"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The name of the operation resource to be cancelled. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionOperationCancelCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationCancelCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationCancelCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationCancelCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation. /// /// A builder for the *regions.clusters.diagnose* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::DiagnoseClusterRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = DiagnoseClusterRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_diagnose(req, "projectId", "region", "clusterName") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterDiagnoseCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: DiagnoseClusterRequest, _project_id: String, _region: String, _cluster_name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterDiagnoseCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterDiagnoseCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.diagnose", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("clusterName", self._cluster_name.to_string())); for &field in ["alt", "projectId", "region", "clusterName"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{clusterName}", "clusterName")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["clusterName", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: DiagnoseClusterRequest) -> ProjectRegionClusterDiagnoseCall<'a, C, A> { self._request = new_value; self } /// Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionClusterDiagnoseCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionClusterDiagnoseCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The cluster name. /// /// Sets the *cluster name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn cluster_name(mut self, new_value: &str) -> ProjectRegionClusterDiagnoseCall<'a, C, A> { self._cluster_name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterDiagnoseCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterDiagnoseCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterDiagnoseCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates (replaces) workflow template. The updated template must contain version that matches the current server version. /// /// A builder for the *regions.workflowTemplates.update* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::WorkflowTemplate; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = WorkflowTemplate::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_update(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: WorkflowTemplate, _name: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WorkflowTemplate)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.update", http_method: hyper::method::Method::Put }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Put, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: WorkflowTemplate) -> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> { self._request = new_value; self } /// Output only. The "resource name" of the template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateUpdateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. /// /// A builder for the *regions.operations.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_list("name") /// .page_token("consetetur") /// .page_size(-52) /// .filter("voluptua.") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationListCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _page_token: Option<String>, _page_size: Option<i32>, _filter: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationListCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, ListOperationsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.list", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } if let Some(value) = self._filter { params.push(("filter", value.to_string())); } for &field in ["alt", "name", "pageToken", "pageSize", "filter"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The name of the operation's parent resource. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionOperationListCall<'a, C, A> { self._name = new_value.to_string(); self } /// The standard list page token. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectRegionOperationListCall<'a, C, A> { self._page_token = Some(new_value.to_string()); self } /// The standard list page size. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectRegionOperationListCall<'a, C, A> { self._page_size = Some(new_value); self } /// The standard list filter. /// /// Sets the *filter* query property to the given value. pub fn filter(mut self, new_value: &str) -> ProjectRegionOperationListCall<'a, C, A> { self._filter = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationListCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationListCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationListCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes a workflow template. It does not cancel in-progress workflows. /// /// A builder for the *locations.workflowTemplates.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_delete("name") /// .version(-11) /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _version: Option<i32>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.delete", http_method: hyper::method::Method::Delete }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._version { params.push(("version", value.to_string())); } for &field in ["alt", "name", "version"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> { self._name = new_value.to_string(); self } /// Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version. /// /// Sets the *version* query property to the given value. pub fn version(mut self, new_value: i32) -> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> { self._version = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateDeleteCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// A builder for the *regions.workflowTemplates.setIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::SetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_set_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: SetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.setIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:setIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SetIamPolicyRequest) -> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateSetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// A builder for the *regions.workflowTemplates.getIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::GetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_get_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: GetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.getIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:getIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: GetIamPolicyRequest) -> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateGetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the resource representation for a job in a project. /// /// A builder for the *regions.jobs.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_get("projectId", "region", "jobId") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobGetCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _project_id: String, _region: String, _job_id: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobGetCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Job)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.get", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("jobId", self._job_id.to_string())); for &field in ["alt", "projectId", "region", "jobId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/jobs/{jobId}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{jobId}", "jobId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["jobId", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The ID of the Google Cloud Platform project that the job belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionJobGetCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionJobGetCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The job ID. /// /// Sets the *job id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn job_id(mut self, new_value: &str) -> ProjectRegionJobGetCall<'a, C, A> { self._job_id = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobGetCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobGetCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobGetCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// A builder for the *locations.workflowTemplates.getIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::GetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_workflow_templates_get_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: GetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.locations.workflowTemplates.getIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:getIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: GetIamPolicyRequest) -> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationWorkflowTemplateGetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes a cluster in a project. /// /// A builder for the *regions.clusters.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_clusters_delete("projectId", "region", "clusterName") /// .request_id("invidunt") /// .cluster_uuid("consetetur") /// .doit(); /// # } /// ``` pub struct ProjectRegionClusterDeleteCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _project_id: String, _region: String, _cluster_name: String, _request_id: Option<String>, _cluster_uuid: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionClusterDeleteCall<'a, C, A> {} impl<'a, C, A> ProjectRegionClusterDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.delete", http_method: hyper::method::Method::Delete }); let mut params: Vec<(&str, String)> = Vec::with_capacity(7 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("clusterName", self._cluster_name.to_string())); if let Some(value) = self._request_id { params.push(("requestId", value.to_string())); } if let Some(value) = self._cluster_uuid { params.push(("clusterUuid", value.to_string())); } for &field in ["alt", "projectId", "region", "clusterName", "requestId", "clusterUuid"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{clusterName}", "clusterName")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["clusterName", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The ID of the Google Cloud Platform project that the cluster belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionClusterDeleteCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionClusterDeleteCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The cluster name. /// /// Sets the *cluster name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn cluster_name(mut self, new_value: &str) -> ProjectRegionClusterDeleteCall<'a, C, A> { self._cluster_name = new_value.to_string(); self } /// Optional. A unique id used to identify the request. If the server receives two DeleteClusterRequest requests with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. /// /// Sets the *request id* query property to the given value. pub fn request_id(mut self, new_value: &str) -> ProjectRegionClusterDeleteCall<'a, C, A> { self._request_id = Some(new_value.to_string()); self } /// Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist. /// /// Sets the *cluster uuid* query property to the given value. pub fn cluster_uuid(mut self, new_value: &str) -> ProjectRegionClusterDeleteCall<'a, C, A> { self._cluster_uuid = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionClusterDeleteCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionClusterDeleteCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionClusterDeleteCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata.On successful completion, Operation.response will be Empty. /// /// A builder for the *regions.workflowTemplates.instantiateInline* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::WorkflowTemplate; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = WorkflowTemplate::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_instantiate_inline(req, "parent") /// .request_id("duo") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: WorkflowTemplate, _parent: String, _request_id: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Operation)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.instantiateInline", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); if let Some(value) = self._request_id { params.push(("requestId", value.to_string())); } for &field in ["alt", "parent", "requestId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+parent}/workflowTemplates:instantiateInline"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: WorkflowTemplate) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._request = new_value; self } /// Required. The "resource name" of the workflow template region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._parent = new_value.to_string(); self } /// Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. /// /// Sets the *request id* query property to the given value. pub fn request_id(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._request_id = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateInstantiateInlineCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes a workflow template. It does not cancel in-progress workflows. /// /// A builder for the *regions.workflowTemplates.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_delete("name") /// .version(-5) /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _version: Option<i32>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.delete", http_method: hyper::method::Method::Delete }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._version { params.push(("version", value.to_string())); } for &field in ["alt", "name", "version"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> { self._name = new_value.to_string(); self } /// Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version. /// /// Sets the *version* query property to the given value. pub fn version(mut self, new_value: i32) -> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> { self._version = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateDeleteCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. /// /// A builder for the *regions.operations.getIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::GetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_operations_get_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionOperationGetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: GetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionOperationGetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionOperationGetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.operations.getIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:getIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: GetIamPolicyRequest) -> ProjectRegionOperationGetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionOperationGetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionOperationGetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionOperationGetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionOperationGetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter. /// /// A builder for the *regions.workflowTemplates.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_get("name") /// .version(-45) /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateGetCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _name: String, _version: Option<i32>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateGetCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WorkflowTemplate)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.get", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._version { params.push(("version", value.to_string())); } for &field in ["alt", "name", "version"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The "resource name" of the workflow template, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region}/workflowTemplates/{template_id} /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateGetCall<'a, C, A> { self._name = new_value.to_string(); self } /// Optional. The version of workflow template to retrieve. Only previously instatiated versions can be retrieved.If unspecified, retrieves the current version. /// /// Sets the *version* query property to the given value. pub fn version(mut self, new_value: i32) -> ProjectRegionWorkflowTemplateGetCall<'a, C, A> { self._version = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateGetCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateGetCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateGetCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Creates new workflow template. /// /// A builder for the *regions.workflowTemplates.create* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::WorkflowTemplate; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = WorkflowTemplate::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_workflow_templates_create(req, "parent") /// .doit(); /// # } /// ``` pub struct ProjectRegionWorkflowTemplateCreateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: WorkflowTemplate, _parent: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionWorkflowTemplateCreateCall<'a, C, A> {} impl<'a, C, A> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WorkflowTemplate)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.workflowTemplates.create", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); for &field in ["alt", "parent"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+parent}/workflowTemplates"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: WorkflowTemplate) -> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> { self._request = new_value; self } /// Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource_names of the form projects/{project_id}/regions/{region} /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> { self._parent = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionWorkflowTemplateCreateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION. /// /// A builder for the *regions.jobs.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_delete("projectId", "region", "jobId") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobDeleteCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _project_id: String, _region: String, _job_id: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobDeleteCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Empty)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.delete", http_method: hyper::method::Method::Delete }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("jobId", self._job_id.to_string())); for &field in ["alt", "projectId", "region", "jobId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/jobs/{jobId}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{jobId}", "jobId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["jobId", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The ID of the Google Cloud Platform project that the job belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionJobDeleteCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionJobDeleteCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The job ID. /// /// Sets the *job id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn job_id(mut self, new_value: &str) -> ProjectRegionJobDeleteCall<'a, C, A> { self._job_id = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobDeleteCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobDeleteCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobDeleteCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Sets the access control policy on the specified resource. Replaces any existing policy. /// /// A builder for the *regions.jobs.setIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::SetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_set_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobSetIamPolicyCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: SetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobSetIamPolicyCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobSetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.setIamPolicy", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/{+resource}:setIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SetIamPolicyRequest) -> ProjectRegionJobSetIamPolicyCall<'a, C, A> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectRegionJobSetIamPolicyCall<'a, C, A> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobSetIamPolicyCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobSetIamPolicyCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobSetIamPolicyCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. /// /// A builder for the *regions.jobs.cancel* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_dataproc1 as dataproc1; /// use dataproc1::CancelJobRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use dataproc1::Dataproc; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = Dataproc::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = CancelJobRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().regions_jobs_cancel(req, "projectId", "region", "jobId") /// .doit(); /// # } /// ``` pub struct ProjectRegionJobCancelCall<'a, C, A> where C: 'a, A: 'a { hub: &'a Dataproc<C, A>, _request: CancelJobRequest, _project_id: String, _region: String, _job_id: String, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for ProjectRegionJobCancelCall<'a, C, A> {} impl<'a, C, A> ProjectRegionJobCancelCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, Job)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.cancel", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("projectId", self._project_id.to_string())); params.push(("region", self._region.to_string())); params.push(("jobId", self._job_id.to_string())); for &field in ["alt", "projectId", "region", "jobId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{projectId}", "projectId"), ("{region}", "region"), ("{jobId}", "jobId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["jobId", "region", "projectId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: CancelJobRequest) -> ProjectRegionJobCancelCall<'a, C, A> { self._request = new_value; self } /// Required. The ID of the Google Cloud Platform project that the job belongs to. /// /// Sets the *project id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn project_id(mut self, new_value: &str) -> ProjectRegionJobCancelCall<'a, C, A> { self._project_id = new_value.to_string(); self } /// Required. The Cloud Dataproc region in which to handle the request. /// /// Sets the *region* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn region(mut self, new_value: &str) -> ProjectRegionJobCancelCall<'a, C, A> { self._region = new_value.to_string(); self } /// Required. The job ID. /// /// Sets the *job id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn job_id(mut self, new_value: &str) -> ProjectRegionJobCancelCall<'a, C, A> { self._job_id = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> ProjectRegionJobCancelCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *access_token* (query-string) - OAuth access token. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *callback* (query-string) - JSONP /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for response. /// * *$.xgafv* (query-string) - V1 error format. pub fn param<T>(mut self, name: T, value: T) -> ProjectRegionJobCancelCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRegionJobCancelCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } }
job_id
job6.py
""" #Trains a ResNet on the CIFAR10 dataset. """ from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.vgg16 import VGG16 from keras.applications.vgg19 import VGG19 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal load_start = time.time() parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num # Training parameters batch_size = 256 args_lr = 0.005 args_model = 'vgg16' epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_test/' + job_name + '*' total_epochs = 9 starting_epoch = 0 if args.resume: save_file = glob.glob(save_files)[0] # epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True n = 3 # Model name, depth and version model_type = args.tc #'P100_resnet50_he_256_1' # Load the CIFAR10 data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') model = keras.models.load_model(save_file) else: print('train from start') model = models.Sequential() if '16' in args_model: base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '19' in args_model: base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() model.add(base_model) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) #model.summary() print(model_type) #pdb.set_trace() current_epoch = 0 ################### connects interrupt signal to the process ##################### def terminateProcess():
signal.signal(signal.SIGTERM, terminateProcess) ################################################################################# logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch') class PrintEpoch(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch #remaining_epochs = epochs - epoch current_epoch = epoch print('current epoch ' + str(current_epoch)) global epoch_begin_time epoch_begin_time = time.time() my_callback = PrintEpoch() callbacks = [tensorboard_callback, my_callback] load_time = int(time.time() - load_start) if args.resume: message = job_name + ' load ' + str(load_time) send_signal.send(args.node, 10002, message) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # send signal to indicate job has finished message = job_name + ' finish' send_signal.send(args.node, 10002, message) sys.exit() model.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, initial_epoch=starting_epoch, verbose=1 ) if not args.resume: terminateProcess()
save_start = time.time() # first record the wasted epoch time global epoch_begin_time if epoch_begin_time == 0: epoch_waste_time = 0 else: epoch_waste_time = int(time.time() - epoch_begin_time) print('checkpointing the model triggered by kill -15 signal') # delete whatever checkpoint that already exists for f in glob.glob(save_files): os.remove(f) model.save('/scratch/li.baol/checkpoint_test/' + job_name + '_' + str(current_epoch) + '.h5') print ('(SIGTERM) terminating the process') save_time = int(time.time() - save_start) message = job_name + ' save ' + str(save_time) send_signal.send(args.node, 10002, message) sys.exit()
l2vpn_atom_pwr_summary.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: l2vpn_atom_pwr_summary.proto package cisco_ios_xr_l2vpn_oper_l2vpnv2_active_pwr_summary import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type L2VpnAtomPwrSummary_KEYS struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnAtomPwrSummary_KEYS) Reset() { *m = L2VpnAtomPwrSummary_KEYS{} } func (m *L2VpnAtomPwrSummary_KEYS) String() string { return proto.CompactTextString(m) } func (*L2VpnAtomPwrSummary_KEYS) ProtoMessage() {} func (*L2VpnAtomPwrSummary_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{0} } func (m *L2VpnAtomPwrSummary_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnAtomPwrSummary_KEYS.Unmarshal(m, b) } func (m *L2VpnAtomPwrSummary_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnAtomPwrSummary_KEYS.Marshal(b, m, deterministic) } func (m *L2VpnAtomPwrSummary_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnAtomPwrSummary_KEYS.Merge(m, src) } func (m *L2VpnAtomPwrSummary_KEYS) XXX_Size() int { return xxx_messageInfo_L2VpnAtomPwrSummary_KEYS.Size(m) } func (m *L2VpnAtomPwrSummary_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnAtomPwrSummary_KEYS.DiscardUnknown(m) } var xxx_messageInfo_L2VpnAtomPwrSummary_KEYS proto.InternalMessageInfo type L2VpnRdAuto struct { RouterId string `protobuf:"bytes,1,opt,name=router_id,json=routerId,proto3" json:"router_id,omitempty"` AutoIndex uint32 `protobuf:"varint,2,opt,name=auto_index,json=autoIndex,proto3" json:"auto_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnRdAuto) Reset() { *m = L2VpnRdAuto{} } func (m *L2VpnRdAuto) String() string { return proto.CompactTextString(m) } func (*L2VpnRdAuto) ProtoMessage() {} func (*L2VpnRdAuto) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{1} } func (m *L2VpnRdAuto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnRdAuto.Unmarshal(m, b) } func (m *L2VpnRdAuto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnRdAuto.Marshal(b, m, deterministic) } func (m *L2VpnRdAuto) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnRdAuto.Merge(m, src) } func (m *L2VpnRdAuto) XXX_Size() int { return xxx_messageInfo_L2VpnRdAuto.Size(m) } func (m *L2VpnRdAuto) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnRdAuto.DiscardUnknown(m) } var xxx_messageInfo_L2VpnRdAuto proto.InternalMessageInfo func (m *L2VpnRdAuto) GetRouterId() string { if m != nil { return m.RouterId } return "" } func (m *L2VpnRdAuto) GetAutoIndex() uint32 { if m != nil { return m.AutoIndex } return 0 } type L2VpnRd_2ByteAs struct { TwoByteAs uint32 `protobuf:"varint,1,opt,name=two_byte_as,json=twoByteAs,proto3" json:"two_byte_as,omitempty"` FourByteIndex uint32 `protobuf:"varint,2,opt,name=four_byte_index,json=fourByteIndex,proto3" json:"four_byte_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnRd_2ByteAs) Reset() { *m = L2VpnRd_2ByteAs{} } func (m *L2VpnRd_2ByteAs) String() string { return proto.CompactTextString(m) } func (*L2VpnRd_2ByteAs) ProtoMessage() {} func (*L2VpnRd_2ByteAs) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{2} } func (m *L2VpnRd_2ByteAs) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnRd_2ByteAs.Unmarshal(m, b) } func (m *L2VpnRd_2ByteAs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnRd_2ByteAs.Marshal(b, m, deterministic) } func (m *L2VpnRd_2ByteAs) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnRd_2ByteAs.Merge(m, src) } func (m *L2VpnRd_2ByteAs) XXX_Size() int { return xxx_messageInfo_L2VpnRd_2ByteAs.Size(m) } func (m *L2VpnRd_2ByteAs) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnRd_2ByteAs.DiscardUnknown(m) } var xxx_messageInfo_L2VpnRd_2ByteAs proto.InternalMessageInfo func (m *L2VpnRd_2ByteAs) GetTwoByteAs() uint32 { if m != nil { return m.TwoByteAs } return 0 } func (m *L2VpnRd_2ByteAs) GetFourByteIndex() uint32 { if m != nil
return 0 } type L2VpnRd_4ByteAs struct { FourByteAs uint32 `protobuf:"varint,1,opt,name=four_byte_as,json=fourByteAs,proto3" json:"four_byte_as,omitempty"` TwoByteIndex uint32 `protobuf:"varint,2,opt,name=two_byte_index,json=twoByteIndex,proto3" json:"two_byte_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnRd_4ByteAs) Reset() { *m = L2VpnRd_4ByteAs{} } func (m *L2VpnRd_4ByteAs) String() string { return proto.CompactTextString(m) } func (*L2VpnRd_4ByteAs) ProtoMessage() {} func (*L2VpnRd_4ByteAs) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{3} } func (m *L2VpnRd_4ByteAs) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnRd_4ByteAs.Unmarshal(m, b) } func (m *L2VpnRd_4ByteAs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnRd_4ByteAs.Marshal(b, m, deterministic) } func (m *L2VpnRd_4ByteAs) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnRd_4ByteAs.Merge(m, src) } func (m *L2VpnRd_4ByteAs) XXX_Size() int { return xxx_messageInfo_L2VpnRd_4ByteAs.Size(m) } func (m *L2VpnRd_4ByteAs) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnRd_4ByteAs.DiscardUnknown(m) } var xxx_messageInfo_L2VpnRd_4ByteAs proto.InternalMessageInfo func (m *L2VpnRd_4ByteAs) GetFourByteAs() uint32 { if m != nil { return m.FourByteAs } return 0 } func (m *L2VpnRd_4ByteAs) GetTwoByteIndex() uint32 { if m != nil { return m.TwoByteIndex } return 0 } type L2VpnRdV4Addr struct { Ipv4Address string `protobuf:"bytes,1,opt,name=ipv4_address,json=ipv4Address,proto3" json:"ipv4_address,omitempty"` TwoByteIndex uint32 `protobuf:"varint,2,opt,name=two_byte_index,json=twoByteIndex,proto3" json:"two_byte_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnRdV4Addr) Reset() { *m = L2VpnRdV4Addr{} } func (m *L2VpnRdV4Addr) String() string { return proto.CompactTextString(m) } func (*L2VpnRdV4Addr) ProtoMessage() {} func (*L2VpnRdV4Addr) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{4} } func (m *L2VpnRdV4Addr) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnRdV4Addr.Unmarshal(m, b) } func (m *L2VpnRdV4Addr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnRdV4Addr.Marshal(b, m, deterministic) } func (m *L2VpnRdV4Addr) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnRdV4Addr.Merge(m, src) } func (m *L2VpnRdV4Addr) XXX_Size() int { return xxx_messageInfo_L2VpnRdV4Addr.Size(m) } func (m *L2VpnRdV4Addr) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnRdV4Addr.DiscardUnknown(m) } var xxx_messageInfo_L2VpnRdV4Addr proto.InternalMessageInfo func (m *L2VpnRdV4Addr) GetIpv4Address() string { if m != nil { return m.Ipv4Address } return "" } func (m *L2VpnRdV4Addr) GetTwoByteIndex() uint32 { if m != nil { return m.TwoByteIndex } return 0 } type L2VpnRd struct { Rd string `protobuf:"bytes,1,opt,name=rd,proto3" json:"rd,omitempty"` Auto *L2VpnRdAuto `protobuf:"bytes,2,opt,name=auto,proto3" json:"auto,omitempty"` TwoByteAs *L2VpnRd_2ByteAs `protobuf:"bytes,3,opt,name=two_byte_as,json=twoByteAs,proto3" json:"two_byte_as,omitempty"` FourByteAs *L2VpnRd_4ByteAs `protobuf:"bytes,4,opt,name=four_byte_as,json=fourByteAs,proto3" json:"four_byte_as,omitempty"` V4Addr *L2VpnRdV4Addr `protobuf:"bytes,5,opt,name=v4_addr,json=v4Addr,proto3" json:"v4_addr,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnRd) Reset() { *m = L2VpnRd{} } func (m *L2VpnRd) String() string { return proto.CompactTextString(m) } func (*L2VpnRd) ProtoMessage() {} func (*L2VpnRd) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{5} } func (m *L2VpnRd) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnRd.Unmarshal(m, b) } func (m *L2VpnRd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnRd.Marshal(b, m, deterministic) } func (m *L2VpnRd) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnRd.Merge(m, src) } func (m *L2VpnRd) XXX_Size() int { return xxx_messageInfo_L2VpnRd.Size(m) } func (m *L2VpnRd) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnRd.DiscardUnknown(m) } var xxx_messageInfo_L2VpnRd proto.InternalMessageInfo func (m *L2VpnRd) GetRd() string { if m != nil { return m.Rd } return "" } func (m *L2VpnRd) GetAuto() *L2VpnRdAuto { if m != nil { return m.Auto } return nil } func (m *L2VpnRd) GetTwoByteAs() *L2VpnRd_2ByteAs { if m != nil { return m.TwoByteAs } return nil } func (m *L2VpnRd) GetFourByteAs() *L2VpnRd_4ByteAs { if m != nil { return m.FourByteAs } return nil } func (m *L2VpnRd) GetV4Addr() *L2VpnRdV4Addr { if m != nil { return m.V4Addr } return nil } type L2VpnAtomPwrSummary struct { BgpRouterId string `protobuf:"bytes,50,opt,name=bgp_router_id,json=bgpRouterId,proto3" json:"bgp_router_id,omitempty"` CfgRouterId string `protobuf:"bytes,51,opt,name=cfg_router_id,json=cfgRouterId,proto3" json:"cfg_router_id,omitempty"` BgpAs uint32 `protobuf:"varint,52,opt,name=bgp_as,json=bgpAs,proto3" json:"bgp_as,omitempty"` CfgGlobalId uint32 `protobuf:"varint,53,opt,name=cfg_global_id,json=cfgGlobalId,proto3" json:"cfg_global_id,omitempty"` RdAuto *L2VpnRd `protobuf:"bytes,54,opt,name=rd_auto,json=rdAuto,proto3" json:"rd_auto,omitempty"` RdConfigured *L2VpnRd `protobuf:"bytes,55,opt,name=rd_configured,json=rdConfigured,proto3" json:"rd_configured,omitempty"` L2VpnHasBgpEod bool `protobuf:"varint,56,opt,name=l2vpn_has_bgp_eod,json=l2vpnHasBgpEod,proto3" json:"l2vpn_has_bgp_eod,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2VpnAtomPwrSummary) Reset() { *m = L2VpnAtomPwrSummary{} } func (m *L2VpnAtomPwrSummary) String() string { return proto.CompactTextString(m) } func (*L2VpnAtomPwrSummary) ProtoMessage() {} func (*L2VpnAtomPwrSummary) Descriptor() ([]byte, []int) { return fileDescriptor_0a6e1ee7d75ddfe3, []int{6} } func (m *L2VpnAtomPwrSummary) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2VpnAtomPwrSummary.Unmarshal(m, b) } func (m *L2VpnAtomPwrSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2VpnAtomPwrSummary.Marshal(b, m, deterministic) } func (m *L2VpnAtomPwrSummary) XXX_Merge(src proto.Message) { xxx_messageInfo_L2VpnAtomPwrSummary.Merge(m, src) } func (m *L2VpnAtomPwrSummary) XXX_Size() int { return xxx_messageInfo_L2VpnAtomPwrSummary.Size(m) } func (m *L2VpnAtomPwrSummary) XXX_DiscardUnknown() { xxx_messageInfo_L2VpnAtomPwrSummary.DiscardUnknown(m) } var xxx_messageInfo_L2VpnAtomPwrSummary proto.InternalMessageInfo func (m *L2VpnAtomPwrSummary) GetBgpRouterId() string { if m != nil { return m.BgpRouterId } return "" } func (m *L2VpnAtomPwrSummary) GetCfgRouterId() string { if m != nil { return m.CfgRouterId } return "" } func (m *L2VpnAtomPwrSummary) GetBgpAs() uint32 { if m != nil { return m.BgpAs } return 0 } func (m *L2VpnAtomPwrSummary) GetCfgGlobalId() uint32 { if m != nil { return m.CfgGlobalId } return 0 } func (m *L2VpnAtomPwrSummary) GetRdAuto() *L2VpnRd { if m != nil { return m.RdAuto } return nil } func (m *L2VpnAtomPwrSummary) GetRdConfigured() *L2VpnRd { if m != nil { return m.RdConfigured } return nil } func (m *L2VpnAtomPwrSummary) GetL2VpnHasBgpEod() bool { if m != nil { return m.L2VpnHasBgpEod } return false } func init() { proto.RegisterType((*L2VpnAtomPwrSummary_KEYS)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_atom_pwr_summary_KEYS") proto.RegisterType((*L2VpnRdAuto)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_rd_auto") proto.RegisterType((*L2VpnRd_2ByteAs)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_rd_2byte_as") proto.RegisterType((*L2VpnRd_4ByteAs)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_rd_4byte_as") proto.RegisterType((*L2VpnRdV4Addr)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_rd_v4addr") proto.RegisterType((*L2VpnRd)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_rd") proto.RegisterType((*L2VpnAtomPwrSummary)(nil), "cisco_ios_xr_l2vpn_oper.l2vpnv2.active.pwr.summary.l2vpn_atom_pwr_summary") } func init() { proto.RegisterFile("l2vpn_atom_pwr_summary.proto", fileDescriptor_0a6e1ee7d75ddfe3) } var fileDescriptor_0a6e1ee7d75ddfe3 = []byte{ // 504 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4d, 0x8f, 0xda, 0x30, 0x10, 0x86, 0x05, 0xec, 0xb2, 0x30, 0x10, 0x56, 0xb5, 0xd4, 0x2a, 0xd2, 0x76, 0x2b, 0x1a, 0x55, 0x15, 0xbd, 0xe4, 0x90, 0xa5, 0x1f, 0x87, 0x5e, 0xb2, 0x2b, 0xd4, 0xa2, 0xbd, 0xa5, 0xea, 0xa1, 0x1f, 0x92, 0xe5, 0xc4, 0x26, 0x8d, 0x04, 0xd8, 0xb2, 0x93, 0xb0, 0xdc, 0xfa, 0x4f, 0xfa, 0x57, 0x2b, 0xdb, 0x49, 0x16, 0xda, 0x3d, 0xb4, 0x82, 0x1b, 0x1e, 0xcf, 0xfb, 0x0c, 0x9e, 0x77, 0x26, 0xf0, 0x74, 0x19, 0x94, 0x62, 0x8d, 0x49, 0xce, 0x57, 0x58, 0x6c, 0x24, 0x56, 0xc5, 0x6a, 0x45, 0xe4, 0xd6, 0x17, 0x92, 0xe7, 0x1c, 0x05, 0x49, 0xa6, 0x12, 0x8e, 0x33, 0xae, 0xf0, 0x9d, 0xc4, 0x36, 0x95, 0x0b, 0x26, 0x7d, 0xf3, 0xb3, 0x0c, 0x7c, 0x92, 0xe4, 0x59, 0xc9, 0x7c, 0xb1, 0x91, 0x7e, 0xa5, 0xf4, 0x2e, 0xe1, 0xe2, 0x61, 0x26, 0xbe, 0x9d, 0x7d, 0xf9, 0xe4, 0xdd, 0x82, 0x63, 0xaf, 0x25, 0xc5, 0xa4, 0xc8, 0x39, 0xba, 0x80, 0xbe, 0xe4, 0x45, 0xce, 0x24, 0xce, 0xa8, 0xdb, 0x1a, 0xb7, 0x26, 0xfd, 0xa8, 0x67, 0x03, 0x73, 0x8a, 0x2e, 0x01, 0x74, 0x12, 0xce, 0xd6, 0x94, 0xdd, 0xb9, 0xed, 0x71, 0x6b, 0xe2, 0x44, 0x7d, 0x1d, 0x99, 0xeb, 0x80, 0xf7, 0x0d, 0x1e, 0x35, 0xb0, 0x20, 0xde, 0xe6, 0x0c, 0x13, 0x85, 0x9e, 0xc1, 0x20, 0xdf, 0x70, 0x5c, 0x1d, 0x0d, 0xd2, 0x89, 0xfa, 0xf9, 0x86, 0x5f, 0x6f, 0x73, 0x16, 0x2a, 0xf4, 0x12, 0xce, 0x17, 0xbc, 0x90, 0x36, 0x61, 0x17, 0xec, 0xe8, 0xb0, 0x4e, 0xfa, 0x1b, 0x3e, 0xad, 0xe1, 0x63, 0x18, 0xde, 0x8b, 0x1b, 0x3a, 0xd4, 0xca, 0x50, 0xa1, 0x17, 0x30, 0x6a, 0xca, 0xef, 0xd2, 0x87, 0xd5, 0x3f, 0xb0, 0xf0, 0xaf, 0x70, 0xde, 0xc0, 0xcb, 0x29, 0xa1, 0x54, 0xa2, 0xe7, 0x30, 0xcc, 0x44, 0x39, 0xc5, 0xfa, 0xc0, 0x94, 0xaa, 0x7a, 0x31, 0xd0, 0xb1, 0xd0, 0x86, 0xfe, 0x91, 0xfd, 0xab, 0x03, 0xbd, 0x1a, 0x8e, 0x46, 0xd0, 0x96, 0x75, 0x5f, 0xdb, 0x92, 0xa2, 0xcf, 0x70, 0xa2, 0xfb, 0x67, 0x84, 0x83, 0x20, 0xf4, 0xff, 0xdf, 0x61, 0x7f, 0xcf, 0xbf, 0xc8, 0xe0, 0x10, 0xdb, 0x6f, 0x7a, 0xc7, 0xd0, 0x67, 0x07, 0xd1, 0x6b, 0x43, 0x77, 0xbd, 0x4b, 0xff, 0x68, 0xff, 0xc9, 0x11, 0xea, 0xd4, 0xde, 0xee, 0xb9, 0xf8, 0x1d, 0xce, 0x2a, 0x2b, 0xdc, 0x53, 0x53, 0xe3, 0xe6, 0xa0, 0x1a, 0xd6, 0xe2, 0xa8, 0x6b, 0xad, 0xf4, 0x7e, 0x76, 0xe0, 0xc9, 0xc3, 0x4b, 0x82, 0x3c, 0x70, 0xe2, 0x54, 0xe0, 0xfb, 0x95, 0x08, 0xec, 0x18, 0xc4, 0xa9, 0x88, 0xea, 0xad, 0xf0, 0xc0, 0x49, 0x16, 0xe9, 0x4e, 0xce, 0x95, 0xcd, 0x49, 0x16, 0x69, 0x93, 0xf3, 0x18, 0xba, 0x9a, 0x43, 0x94, 0x3b, 0x35, 0x23, 0x72, 0x1a, 0xa7, 0x22, 0x54, 0xb5, 0x34, 0x5d, 0xf2, 0x98, 0x2c, 0xb5, 0xf4, 0xb5, 0xb9, 0xd5, 0xd2, 0x0f, 0x26, 0x36, 0xd7, 0x23, 0x72, 0x56, 0x99, 0xeb, 0xbe, 0x31, 0x6f, 0x7f, 0x7f, 0xc8, 0xdb, 0xa3, 0xae, 0xa4, 0xa1, 0x1e, 0x11, 0x02, 0x8e, 0xa4, 0x38, 0xe1, 0xeb, 0x45, 0x96, 0x16, 0x92, 0x51, 0xf7, 0xed, 0x11, 0xe0, 0x43, 0x49, 0x6f, 0x1a, 0x22, 0x7a, 0x55, 0xaf, 0xec, 0x0f, 0xa2, 0xb0, 0x7e, 0x3e, 0xe3, 0xd4, 0x7d, 0x37, 0x6e, 0x4d, 0x7a, 0xd1, 0xc8, 0x5c, 0x7c, 0x24, 0xea, 0x3a, 0x15, 0x33, 0x4e, 0xe3, 0xae, 0xf9, 0xc2, 0x5d, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x63, 0x45, 0xe1, 0xe3, 0x01, 0x05, 0x00, 0x00, }
{ return m.FourByteIndex }
sample.py
from pillowtop.logger import pillow_logging from pillowtop.processors.interface import BulkPillowProcessor, PillowProcessor class NoopProcessor(PillowProcessor):
class LoggingProcessor(PillowProcessor): """ Processor that just logs things - useful in tests or debugging. """ def __init__(self, logger=None): self.logger = logger or pillow_logging def process_change(self, change): self.logger.info(change) class CountingProcessor(PillowProcessor): """ Processor that just counts how many things it has processed """ def __init__(self): self.count = 0 def process_change(self, change): self.count += 1 class TestProcessor(PillowProcessor): """ Processor that just keeps the change in an in-memory list for testing """ def __init__(self): self.changes_seen = [] def process_change(self, change): self.changes_seen.append(change) def reset(self): self.changes_seen = [] class ChunkedCountProcessor(BulkPillowProcessor): def __init__(self): self.count = 0 def process_change(self, change): self.count += 1 def process_changes_chunk(self, changes_chunk): self.count += len(changes_chunk) return [], []
""" Processor that does absolutely nothing. """ def process_change(self, change): pass
StringSplitAndJoin.py
def
(line): new = line.split(" ") return "-".join(new) if __name__ == '__main__': line = input() result = split_and_join(line) print(result)
split_and_join