hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
08108d8cdd063ce0c5e9f0887f7bce1ed2a3533e | 973 | use std::env;
use std::io::{self};
fn main() {
println!("Welcome to fib, an enterprise ready fibonacci calculator");
let n: i128;
let cmd_line: Vec<String> = env::args().collect();
if cmd_line.len() == 1 {
println!("input a number...");
let buf = String::new();
n = get_input(buf).trim().parse::<i128>().unwrap();
} else {
n = cmd_line[1].trim().parse::<i128>().unwrap();
}
println!("Now calculating fib for n={}",n);
let ans = calc_fib(n);
println!("ans={}", ans);
}
fn get_input(mut buffer: String) -> String {
let stdin = io::stdin();
stdin.read_line(&mut buffer).expect("Failed");
buffer
}
fn calc_fib(n: i128) -> i128 {
let sz: usize = n as usize;
let mut fib: Vec<i128> = Vec::new();
fib.push(0);
fib.push(1);
let mut iter = 2;
while fib.len() < sz {
fib.push(fib[iter - 2] + fib[iter - 1]);
iter = iter + 1;
}
fib[sz - 1]
} | 24.325 | 73 | 0.533402 |
5d4699158a7fb02fc68d7e271f912bf503d125ae | 363 | use std::path::PathBuf;
use crate::file::File;
#[derive(Debug)]
pub enum Target<'o> {
Found(File<'o>),
Missing(&'o PathBuf),
}
impl<'o> Target<'o> {
pub fn new(name: &'o PathBuf) -> Target<'o> {
match File::find(name) {
Some(file) => Target::Found(file),
None => Target::Missing(name),
}
}
}
| 18.15 | 53 | 0.504132 |
f7695c288378f2fb176669dd434db9b94c8906ea | 1,422 | use std::sync::Arc;
use crate::datasource::{HBeeTableDesc, HCombTableDesc, S3ParquetTable};
use crate::error::Result;
use crate::internal_err;
use crate::models::SizedFile;
use crate::protobuf;
use arrow::ipc::convert;
pub fn deserialize_hbee(
message: protobuf::HBeeScanNode,
) -> Result<(HBeeTableDesc, String, String)> {
let schema = convert::schema_from_bytes(&message.schema)?;
let scan = message
.scan
.ok_or(internal_err!("Scan field cannot be empty"))?;
let provider = match scan {
protobuf::h_bee_scan_node::Scan::S3Parquet(scan_node) => S3ParquetTable::new(
scan_node.region.to_owned(),
scan_node.bucket.to_owned(),
scan_node
.files
.iter()
.map(|sized_file| SizedFile {
key: sized_file.key.to_owned(),
length: sized_file.length,
})
.collect(),
Arc::new(schema),
),
};
Ok((provider, message.sql, message.source))
}
pub fn deserialize_hcomb(
message: protobuf::HCombScanNode,
) -> Result<(HCombTableDesc, String, String)> {
let schema = convert::schema_from_bytes(&message.schema)?;
let provider = HCombTableDesc::new(
message.query_id.to_owned(),
message.nb_hbee as usize,
Arc::new(schema),
);
Ok((provider, message.sql, message.source))
}
| 30.255319 | 85 | 0.605485 |
d7604c868152be753ea85dfe3d7404a1f347170a | 1,689 | use crate::timer::{Entry, HandlePriv};
use crate::Error;
use std::sync::Arc;
use std::task::{self, Poll};
use std::time::{Duration, Instant};
/// Registration with a timer.
///
/// The association between a `Delay` instance and a timer is done lazily in
/// `poll`
#[derive(Debug)]
pub(crate) struct Registration {
entry: Arc<Entry>,
}
impl Registration {
pub fn new(deadline: Instant, duration: Duration) -> Registration {
fn is_send<T: Send + Sync>() {}
is_send::<Registration>();
Registration {
entry: Arc::new(Entry::new(deadline, duration)),
}
}
pub fn deadline(&self) -> Instant {
self.entry.time_ref().deadline
}
pub fn register(&mut self) {
if !self.entry.is_registered() {
Entry::register(&mut self.entry)
}
}
pub fn register_with(&mut self, handle: HandlePriv) {
Entry::register_with(&mut self.entry, handle)
}
pub fn reset(&mut self, deadline: Instant) {
self.entry.time_mut().deadline = deadline;
Entry::reset(&mut self.entry);
}
// Used by `Timeout<Stream>`
#[cfg(feature = "async-traits")]
pub fn reset_timeout(&mut self) {
let deadline = crate::clock::now() + self.entry.time_ref().duration;
self.entry.time_mut().deadline = deadline;
Entry::reset(&mut self.entry);
}
pub fn is_elapsed(&self) -> bool {
self.entry.is_elapsed()
}
pub fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {
self.entry.poll_elapsed(cx)
}
}
impl Drop for Registration {
fn drop(&mut self) {
Entry::cancel(&self.entry);
}
}
| 25.208955 | 87 | 0.596211 |
0ad1387c4952172d891a37e68fdf9583caed99a9 | 1,157 | use crate::cmd::library::generate::config::Config;
use crate::cmd::library::generate::task::Task;
use crate::cmd::library::generate::tasks::package::package_bootstrap::PackageBootstrapTask;
use crate::cmd::library::generate::tasks::package::package_documentation::PackageDocumentationTask;
use crate::cmd::library::generate::tasks::package::package_example::PackageExampleTask;
use crate::manifest::library::Library;
use crate::manifest::package::Package;
use crate::result::Result;
mod package_bootstrap;
mod package_documentation;
mod package_example;
pub fn parse_package(
_config: &Config,
_library: &Library,
_package: &Package,
) -> Result<Vec<Box<dyn Task>>> {
log::debug!("parse package {}", &_package.urn);
let mut tasks: Vec<Box<dyn Task>> = vec![];
for example in _package.examples.iter() {
tasks.push(Box::from(PackageExampleTask::create(
_config, _library, _package, example,
)?));
}
tasks.push(Box::from(PackageBootstrapTask::create(_config, _package)?));
tasks.push(Box::from(PackageDocumentationTask::create(
_config, _library, _package,
)?));
Ok(tasks)
}
| 33.057143 | 99 | 0.705272 |
5b9caac2df28b2999b01a7fd55c4df4e6ea42f05 | 6,835 | use crate::{Button, Divider, GlobalClose, Icon, Position};
use std::rc::Rc;
use yew::{
html::ChildrenRenderer,
prelude::*,
virtual_dom::{VChild, VComp},
};
#[derive(Clone, PartialEq, Properties)]
pub struct Props {
#[prop_or_default]
pub toggle: Option<Html>,
#[prop_or_default]
pub children: ChildrenRenderer<AppLauncherChildVariant>,
#[prop_or_default]
pub disabled: bool,
#[prop_or_default]
pub position: Position,
}
pub enum Msg {
Toggle,
Close,
}
pub struct AppLauncher {
expanded: bool,
global_close: GlobalClose,
}
impl Component for AppLauncher {
type Message = Msg;
type Properties = Props;
fn create(ctx: &Context<Self>) -> Self {
let global_close =
GlobalClose::new(NodeRef::default(), ctx.link().callback(|_| Msg::Close));
Self {
expanded: false,
global_close,
}
}
fn update(&mut self, _ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::Toggle => {
self.expanded = !self.expanded;
}
Msg::Close => self.expanded = false,
}
true
}
fn view(&self, ctx: &Context<Self>) -> Html {
let mut classes = Classes::from("pf-c-app-launcher");
let mut menu_classes = Classes::from("pf-c-app-launcher__menu");
match ctx.props().position {
Position::Left => {}
Position::Right => menu_classes.push("pf-m-align-right"),
Position::Top => classes.push("pf-m-top"),
}
if self.expanded {
classes.push("pf-m-expanded");
}
let onclick = ctx.link().callback(|_| Msg::Toggle);
return html! {
<nav
class={classes}
ref={self.global_close.clone()}
>
<Button
class="pf-c-app-launcher__toggle"
r#type="button"
disabled={ctx.props().disabled}
onclick={onclick}
>
{ self.render_trigger(&ctx.props()) }
</Button>
<ul
class={menu_classes}
hidden={!self.expanded}
>
{ for ctx.props().children.iter().map(|mut c|{
// request a close callback from the item
c.set_need_close(ctx.link().callback(|_|Msg::Close));
c
}) }
</ul>
</nav>
};
}
}
impl AppLauncher {
fn render_trigger(&self, props: &Props) -> Html {
if let Some(toggle) = &props.toggle {
toggle.clone()
} else {
Icon::Th.into()
}
}
}
#[derive(Clone, PartialEq)]
pub enum AppLauncherChild {
Item(Rc<<AppLauncherItem as Component>::Properties>),
Divider(Rc<<Divider as Component>::Properties>),
}
impl From<AppLauncherItemProps> for AppLauncherChild {
fn from(props: AppLauncherItemProps) -> Self {
AppLauncherChild::Item(Rc::new(props))
}
}
impl From<()> for AppLauncherChild {
fn from(_: ()) -> Self {
AppLauncherChild::Divider(Rc::new(()))
}
}
#[derive(PartialEq, Clone)]
pub struct AppLauncherChildVariant {
props: AppLauncherChild,
}
impl AppLauncherChildVariant {
/// Forward the need to get a close callback to the actual item
fn set_need_close(&mut self, callback: Callback<()>) {
match self.props {
AppLauncherChild::Item(ref mut props) => {
let props = Rc::make_mut(props);
props.want_close = callback;
}
_ => {}
}
}
}
impl<CHILD> From<VChild<CHILD>> for AppLauncherChildVariant
where
CHILD: Component,
CHILD::Properties: Into<AppLauncherChild> + Clone,
{
fn from(vchild: VChild<CHILD>) -> Self {
Self {
props: (*vchild.props).clone().into(),
}
}
}
impl Into<Html> for AppLauncherChildVariant {
fn into(self) -> Html {
match self.props {
AppLauncherChild::Item(props) => {
VComp::new::<AppLauncherItem>(props, NodeRef::default(), None).into()
}
AppLauncherChild::Divider(props) => {
VComp::new::<Divider>(props, NodeRef::default(), None).into()
}
}
}
}
// Item
#[derive(Clone, PartialEq, Properties)]
pub struct AppLauncherItemProps {
#[prop_or_default]
pub children: Children,
#[prop_or_default]
pub href: String,
#[prop_or_default]
pub onclick: Option<Callback<()>>,
#[prop_or_default]
pub(crate) want_close: Callback<()>,
#[prop_or_default]
pub external: bool,
}
#[derive(Copy, Clone)]
pub enum AppLauncherItemMsg {
Clicked,
}
#[derive(Clone)]
pub struct AppLauncherItem {}
impl Component for AppLauncherItem {
type Message = AppLauncherItemMsg;
type Properties = AppLauncherItemProps;
fn create(_: &Context<Self>) -> Self {
Self {}
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
AppLauncherItemMsg::Clicked => {
if let Some(onclick) = &ctx.props().onclick {
onclick.emit(());
}
// request close from our parent
ctx.props().want_close.emit(());
}
}
false
}
fn view(&self, ctx: &Context<Self>) -> Html {
let action = if ctx.props().onclick.is_some() {
html! {
<Button
class="pf-c-app-launcher__menu-item"
onclick={ctx.link().callback(|_|Self::Message::Clicked)}
>
{ for ctx.props().children.iter() }
</Button>
}
} else {
let mut classes = Classes::from("pf-c-app-launcher__menu-item");
let target = if ctx.props().external {
classes.push("pf-m-external");
"_blank"
} else {
""
};
html! {
<a
class={classes}
target={target}
href={ctx.props().href.clone()}
>
{ for ctx.props().children.iter() }
if ctx.props().external {
<span class="pf-c-app-launcher__menu-item-external-icon">
{ Icon::ExternalLinkAlt }
</span>
<span class="pf-screen-reader">{"(opens new window)"}</span>
}
</a>
}
};
return html! {
<li>{action}</li>
};
}
}
| 26.389961 | 86 | 0.505925 |
ab024aab4119a42c0216516340f5fbb4dfb30862 | 23,322 | use std::convert::TryInto;
use std::fmt::{self, Debug};
use async_trait::async_trait;
use bytes::Bytes;
use http::Response as HttpResponse;
use log::{debug, error};
use thiserror::Error;
use url::Url;
use crate::api;
use crate::auth::{AuthError, Authenticated, Scope, Unauthenticated};
use crate::{ApiError, AsyncClient, AsyncQuery, Client, Login, Query, RestClient};
/// The error type which is returned by constructor for a Traduora client.
#[derive(Debug, Error)]
#[non_exhaustive]
pub enum TraduoraError {
/// URL for the Traduora API failed to parse.
#[error("failed to parse url: {}", source)]
UrlParse {
/// Inner error.
#[from]
source: url::ParseError,
},
/// Authorization header could not be set.
#[error("error setting auth header: {}", source)]
AuthError {
/// Inner error.
#[from]
source: AuthError,
},
/// Reqwest failed to process the request.
#[error("communication with traduora: {}", source)]
Communication {
/// Inner error.
#[from]
source: reqwest::Error,
},
/// HTTP error.
#[error("traduora HTTP error: {}", status)]
Http {
/// Status code returned from server
status: reqwest::StatusCode,
},
/// No response from Traduora.
#[error("no response from traduora")]
NoResponse {},
/// Serde failed to deserialize the JSON to the given type.
#[error("could not parse {} data from JSON: {}", typename, source)]
DataType {
/// Inner error.
#[source]
source: serde_json::Error,
/// The type that failed to deserialize.
typename: &'static str,
},
/// Error accessing the API.
#[error("api error: {}", source)]
Api {
/// Inner error.
#[from]
source: ApiError<RestError>,
},
}
type TraduoraResult<T> = Result<T, TraduoraError>;
/// A representation of the Traduora API for a single user.
///
/// Separate users should use separate instances of this.
#[derive(Clone)]
pub struct Traduora<A: Scope> {
/// The client to use for API calls.
client: reqwest::blocking::Client,
/// The base URL to use for API calls.
rest_url: Url,
/// The authentication information to use when communicating with Traduora.
token: A,
}
impl<A: Scope + Debug> Debug for Traduora<A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Traduora")
.field("rest_url", &self.rest_url)
.field("token", &format!("{:?}", self.token))
.finish()
}
}
impl Traduora<Unauthenticated> {
/// Create a new Traduora API representation.
///
/// Calling this method does not query the API.
///
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
/// # Examples
/// ```
/// # use traduora::TraduoraError;
/// use traduora::Traduora;
/// # fn main() -> Result<(), TraduoraError> {
/// let client = Traduora::new("localhost:8080")?;
/// # Ok(())
/// # }
/// ```
pub fn new<T>(host: T) -> TraduoraResult<Self>
where
T: AsRef<str>,
{
Builder::new(host.as_ref()).build()
}
/// Create a new non-SSL Traduora API representation.
///
/// Calling this method does not query the API.
///
/// # Warning
/// It is **strongly** recommended to use [`Traduora::new`] instead to force encryption.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
///
/// # Examples
/// ```
/// # use traduora::TraduoraError;
/// use traduora::Traduora;
/// # fn main() -> Result<(), TraduoraError> {
/// let client = Traduora::new_insecure("localhost:8080")?;
/// # Ok(())
/// # }
/// ```
pub fn new_insecure<T>(host: T) -> TraduoraResult<Self>
where
T: AsRef<str>,
{
Builder::new(host.as_ref())
.use_http(true)
.validate_certs(false)
.build()
}
/// Tries to authenticate the Traduora client.
///
/// Calling this method queries the Traduora API.
///
/// # Errors
/// This method returns an error if the provided credentials are invalid.
///
/// # Examples
/// ```no_run
/// # use traduora::TraduoraError;
/// use traduora::{Login, Traduora};
/// # fn main() -> Result<(), TraduoraError> {
/// let client = Traduora::new_insecure("localhost:8080")?;
/// let login = Login::password("[email protected]", "password");
/// let authenticated_client = client.authenticate(&login)?;
/// # Ok(())
/// # }
/// ```
pub fn authenticate(self, login: &Login) -> TraduoraResult<Traduora<Authenticated>> {
let token = login.query(&self)?;
Ok(Traduora {
client: self.client,
rest_url: self.rest_url,
token: token.into(),
})
}
}
impl Traduora<Authenticated> {
/// Create a new Traduora API representation and authenticate
/// the user.
///
/// Calling this method queries the Traduora API.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
///
/// # Examples
/// ```no_run
/// # use traduora::TraduoraError;
/// use traduora::{Login, Traduora};
/// # fn main() -> Result<(), TraduoraError> {
/// let login = Login::password("[email protected]", "password");
/// let client = Traduora::with_auth("localhost:8080", login)?;
/// # Ok(())
/// # }
/// ```
pub fn with_auth<T>(host: T, login: Login) -> TraduoraResult<Self>
where
T: AsRef<str>,
{
Builder::new(host.as_ref()).authenticate(login).build()
}
/// Create a new non-SSL Traduora API representation
/// and authenticate the user.
///
/// Calling this method queries the Traduora API.
///
/// # Warning
/// It is **strongly** recommended to use [`Traduora::new`] instead to force encryption.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
///
/// # Examples
/// ```no_run
/// # use traduora::TraduoraError;
/// use traduora::{Login, Traduora};
/// # fn main() -> Result<(), TraduoraError> {
/// let login = Login::password("[email protected]", "password");
/// let client = Traduora::with_auth_insecure("localhost:8080", login)?;
/// # Ok(())
/// # }
/// ```
pub fn with_auth_insecure<T>(host: T, login: Login) -> TraduoraResult<Self>
where
T: AsRef<str>,
{
Builder::new(host.as_ref())
.use_http(true)
.validate_certs(false)
.authenticate(login)
.build()
}
}
#[derive(Debug, Error)]
#[non_exhaustive]
pub enum RestError {
#[error("error setting auth header: {}", source)]
AuthError {
#[from]
source: AuthError,
},
#[error("communication with traduora: {}", source)]
Communication {
#[from]
source: reqwest::Error,
},
#[error("`http` error: {}", source)]
Http {
#[from]
source: http::Error,
},
}
impl<A: Scope> RestClient for Traduora<A> {
type Error = RestError;
type AccessLevel = A;
fn rest_endpoint(&self, endpoint: &str) -> Result<Url, ApiError<Self::Error>> {
debug!(target: "traduora", "REST api call {}", endpoint);
Ok(self.rest_url.join(endpoint)?)
}
}
impl<A: Scope> Client for Traduora<A> {
fn rest(
&self,
mut request: http::request::Builder,
body: Vec<u8>,
) -> Result<HttpResponse<Bytes>, ApiError<Self::Error>> {
let call = || -> Result<_, RestError> {
self.token.set_header(request.headers_mut().unwrap())?;
let http_request = request.body(body)?;
let request = http_request.try_into()?;
let rsp = self.client.execute(request)?;
let mut http_rsp = HttpResponse::builder()
.status(rsp.status())
.version(rsp.version());
let headers = http_rsp.headers_mut().unwrap();
for (key, value) in rsp.headers() {
headers.insert(key, value.clone());
}
Ok(http_rsp.body(rsp.bytes()?)?)
};
call().map_err(ApiError::client)
}
}
/// A representation of the asynchronous Traduora API for a single user.
///
/// Separate users should use separate instances of this.
#[derive(Clone)]
pub struct AsyncTraduora<A: Scope> {
/// The client to use for API calls.
client: reqwest::Client,
/// The base URL to use for API calls.
rest_url: Url,
/// The authentication information to use when communicating with Traduora.
token: A,
}
impl<A: Scope + Debug> Debug for AsyncTraduora<A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("AsyncTraduora")
.field("rest_url", &self.rest_url)
.field("token", &format!("{:?}", self.token))
.finish()
}
}
#[async_trait]
impl<A: Scope> RestClient for AsyncTraduora<A> {
type Error = RestError;
fn rest_endpoint(&self, endpoint: &str) -> Result<Url, ApiError<Self::Error>> {
debug!(target: "traduora", "REST api call {}", endpoint);
Ok(self.rest_url.join(endpoint)?)
}
type AccessLevel = Authenticated;
}
#[async_trait]
impl<A: Scope + Send + Sync> AsyncClient for AsyncTraduora<A> {
async fn rest_async(
&self,
mut request: http::request::Builder,
body: Vec<u8>,
) -> Result<HttpResponse<Bytes>, ApiError<Self::Error>> {
let call = || async {
self.token.set_header(request.headers_mut().unwrap())?;
let http_request = request.body(body)?;
let request = http_request.try_into()?;
let rsp = self.client.execute(request).await?;
let mut http_rsp = HttpResponse::builder()
.status(rsp.status())
.version(rsp.version());
let headers = http_rsp.headers_mut().unwrap();
for (key, value) in rsp.headers() {
headers.insert(key, value.clone());
}
Ok(http_rsp.body(rsp.bytes().await?)?)
};
call().await.map_err(ApiError::client)
}
}
impl AsyncTraduora<Unauthenticated> {
/// Create a new Traduora API representation.
///
/// Calling this method does not query the API.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
///
/// # Examples
/// ```
/// # use traduora::TraduoraError;
/// use traduora::AsyncTraduora;
/// # fn main() -> Result<(), TraduoraError> {
/// let client = AsyncTraduora::new("localhost:8080")?;
/// # Ok(())
/// # }
/// ```
pub fn new<T>(host: T) -> TraduoraResult<Self>
where
T: AsRef<str>,
{
Builder::new(host.as_ref()).build_async()
}
/// Create a new non-SSL Traduora API representation.
///
/// Calling this method does not query the API.
///
/// # Warning
/// It is **strongly** recommended to use [`AsyncTraduora::new`] instead to force encryption.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
///
/// # Examples
/// ```
/// # use traduora::TraduoraError;
/// use traduora::AsyncTraduora;
/// # fn main() -> Result<(), TraduoraError> {
/// let client = AsyncTraduora::new_insecure("localhost:8080")?;
/// # Ok(())
/// # }
/// ```
pub fn new_insecure<T>(host: T) -> TraduoraResult<Self>
where
T: AsRef<str>,
{
Builder::new(host.as_ref())
.use_http(true)
.validate_certs(false)
.build_async()
}
/// Tries to authenticate the Traduora client.
///
/// Calling this method queries the Traduora API.
///
/// # Errors
/// This method returns an error if the provided credentials are invalid.
///
/// # Examples
/// ```no_run
/// # use traduora::TraduoraError;
/// use traduora::{Login, AsyncTraduora};
/// # async fn main_async() -> Result<(), TraduoraError> {
/// let client = AsyncTraduora::new_insecure("localhost:8080")?;
/// let login = Login::password("[email protected]", "password");
/// let authenticated_client = client.authenticate(&login).await?;
/// # Ok(())
/// # }
/// ```
pub async fn authenticate(self, login: &Login) -> TraduoraResult<AsyncTraduora<Authenticated>> {
let token = login.query_async(&self).await?;
Ok(AsyncTraduora {
client: self.client,
rest_url: self.rest_url,
token: token.into(),
})
}
}
impl AsyncTraduora<Authenticated> {
/// Create a new Traduora API representation
/// and authenticate the user.
///
/// Calling this method queries the Traduora API.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
///
/// # Examples
/// ```no_run
/// # use traduora::TraduoraError;
/// use traduora::{Login, AsyncTraduora};
/// # async fn main_async() -> Result<(), TraduoraError> {
/// let login = Login::password("[email protected]", "password");
/// let client = AsyncTraduora::with_auth("localhost:8080", login).await?;
/// # Ok(())
/// # }
/// ```
pub async fn with_auth<T>(host: T, login: Login) -> TraduoraResult<Self>
where
T: AsRef<str> + Sync + Send + 'static,
{
Builder::new(host.as_ref())
.authenticate(login)
.build_async()
.await
}
/// Create a new non-SSL Traduora API representation
/// and authenticate the user.
///
/// Calling this method queries the Traduora API.
///
/// # Warning
/// It is **strongly** recommended to use [`Traduora::new`] instead to force encryption.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
///
/// # Examples
/// ```no_run
/// # use traduora::TraduoraError;
/// use traduora::{Login, AsyncTraduora};
/// # async fn main_async() -> Result<(), TraduoraError> {
/// let login = Login::password("[email protected]", "password");
/// let client = AsyncTraduora::with_auth_insecure("localhost:8080", login).await?;
/// # Ok(())
/// # }
/// ```
pub async fn with_auth_insecure<T>(host: T, login: Login) -> TraduoraResult<Self>
where
T: AsRef<str> + Sync + Send + 'static,
{
Builder::new(host.as_ref())
.use_http(true)
.validate_certs(false)
.authenticate(login)
.build_async()
.await
}
}
/// Creates a new instance of [`Traduora`] or [`AsyncTraduora`] with custom parameters.
///
/// The builder is what the constructors on these types call under the hood.
///
/// # Examples
/// Assume you want to connect to a Traduora instance with encryption
/// that only has a self-signed certificate and you already stored an
/// access token somewhere.
/// ```
/// use traduora::{api::AccessToken, TraduoraBuilder};
///
/// # fn main() -> Result<(), traduora::TraduoraError> {
/// let token = AccessToken::new("eyJhbGc...................XMywm-zM");
/// let client = TraduoraBuilder::new("localhost:8080")
/// .use_http(true)
/// .validate_certs(false)
/// .with_access_token(token)
/// .build()?;
/// # Ok(())
/// # }
/// ```
///
#[derive(Clone, Debug)]
#[must_use]
pub struct Builder<'h, L> {
host: &'h str,
protocol: &'static str,
validate_certs: bool,
login: L,
}
impl<'h> Builder<'h, ()> {
/// Construct a new builder instance.
///
/// The builder is intialized with the following defaults:
/// - uses HTTPS
/// - validates certificates
/// - unauthenticated access
pub const fn new(host: &'h str) -> Self {
Self {
host,
protocol: "https",
validate_certs: true,
login: (),
}
}
/// Adds login information to the builder.
///
/// Note that the Traduora API is not queried when calling this
/// function. It is queried only when calling [`Builder::build`]
/// or [`Builder::build_async`].
pub const fn authenticate(self, login: Login) -> Builder<'h, Login> {
Builder {
host: self.host,
protocol: self.protocol,
validate_certs: self.validate_certs,
login,
}
}
/// Adds an access token string to the builder.
///
/// Note that the Traduora API won't be queried at all when the
/// client is built with this method. The token is assumed to be valid
/// and passed to the client without any modifications.
pub const fn with_access_token(self, login: api::AccessToken) -> Builder<'h, api::AccessToken> {
Builder {
host: self.host,
protocol: self.protocol,
validate_certs: self.validate_certs,
login,
}
}
/// Builds a synchronous client without authentification information.
///
/// # Errors
/// This method returns an error if
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
pub fn build(&self) -> TraduoraResult<Traduora<Unauthenticated>> {
self.build_unauthenticated()
}
/// Builds an asynchronous client without authentification information.
///
/// # Errors
/// This method returns an error if
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
pub fn build_async(&self) -> TraduoraResult<AsyncTraduora<Unauthenticated>> {
self.build_unauthenticated_async()
}
}
impl<'h> Builder<'h, Login> {
/// Builds a synchronous client with authentification information.
///
/// Calling this method queries the Traduora API for an access token.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
pub fn build(self) -> TraduoraResult<Traduora<Authenticated>> {
let api = self.build_unauthenticated()?;
api.authenticate(&self.login)
}
/// Builds an asynchronous client with authentification information.
///
/// Calling this method queries the Traduora API for an access token.
///
/// # Errors
/// This method returns an error if
/// - the provided credentials are invalid.
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
pub async fn build_async(self) -> TraduoraResult<AsyncTraduora<Authenticated>> {
let api = self.build_unauthenticated_async()?;
api.authenticate(&self.login).await
}
}
impl<'h> Builder<'h, api::AccessToken> {
/// Builds a synchronous client with authentification information.
///
/// Calling this method does not query the Traduora API. The access
/// token is assumed to be valid. In case it's not, calls to endpoints
/// requiring authentification will fail.
///
/// # Errors
/// This method returns an error if
/// - the host url fails to parse.
/// - the underlying [`reqwest::blocking::Client`] cannot be initialized.
pub fn build(&self) -> TraduoraResult<Traduora<Authenticated>> {
let api = self.build_unauthenticated()?;
Ok(Traduora {
client: api.client,
rest_url: api.rest_url,
token: self.login.clone().into(),
})
}
/// Builds an asynchronous client with authentification information.
///
/// Calling this method does not query the Traduora API. The access
/// token is assumed to be valid. In case it's not, calls to endpoints
/// requiring authentification will fail.
///
/// # Errors
/// This method returns an error if
/// - the host url fails to parse.
/// - the underlying [`reqwest::Client`] cannot be initialized.
pub async fn build_async(&self) -> TraduoraResult<AsyncTraduora<Authenticated>> {
let api = self.build_unauthenticated_async()?;
Ok(AsyncTraduora {
client: api.client,
rest_url: api.rest_url,
token: self.login.clone().into(),
})
}
}
impl<'h, L> Builder<'h, L> {
/// Decides whether to connect with unencrypted HTTP
/// or via HTTPS.
///
/// # Warning
/// It is **strongly** recommended to use encryption. Otherwise,
/// login data will be sent in plain text.
/// You can try a self-signed certificate instead, or even better
/// a fully valid one.
pub const fn use_http(mut self, use_http: bool) -> Self {
self.protocol = if use_http { "http" } else { "https" };
self
}
/// Decides whether the SSL certificates will be validate when
/// opening the connection.
///
/// # Warning
/// It is recommended to just use valid (non-self-signed) certificates.
pub const fn validate_certs(mut self, validate: bool) -> Self {
self.validate_certs = validate;
self
}
fn build_rest_url(&self) -> Result<Url, url::ParseError> {
format!("{}://{}/api/v1/", self.protocol, self.host).parse()
}
fn build_unauthenticated(&self) -> TraduoraResult<Traduora<Unauthenticated>> {
Ok(Traduora {
client: reqwest::blocking::Client::builder()
.danger_accept_invalid_certs(!self.validate_certs)
.build()?,
rest_url: self.build_rest_url()?,
token: Unauthenticated,
})
}
fn build_unauthenticated_async(&self) -> TraduoraResult<AsyncTraduora<Unauthenticated>> {
Ok(AsyncTraduora {
client: reqwest::Client::builder()
.danger_accept_invalid_certs(!self.validate_certs)
.build()?,
rest_url: self.build_rest_url()?,
token: Unauthenticated,
})
}
}
| 31.904241 | 100 | 0.583012 |
d5013ebb204fe387c0b30664be0ad6236d57c157 | 304 | macro_rules! it_must {
($func_name:ident $code:block) => {
#[test]
fn $func_name() {
assert!($code)
}
};
($($fn:ident $code:block)+) => {
$(
it_must! {
$fn $code
}
)+
}
}
pub(crate) use it_must; | 16.888889 | 39 | 0.371711 |
aba23e53282c788e7a4902ca5b3c54fc5a644e87 | 974 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::util;
use std::num::Zero;
#[deriving(Zero)]
struct A;
#[deriving(Zero)]
struct B(int);
#[deriving(Zero)]
struct C(int, int);
#[deriving(Zero)]
struct D { a: int }
#[deriving(Zero)]
struct E { a: int, b: int }
#[deriving(Zero)]
struct Lots {
c: Option<util::NonCopyable>,
d: u8,
e: char,
f: float,
g: (f32, char),
h: ~[util::NonCopyable],
i: @mut (int, int),
j: bool,
k: (),
}
fn main() {
let lots: Lots = Zero::zero();
assert!(lots.is_zero());
}
| 23.190476 | 69 | 0.647844 |
e94663570a370be9786c02171e1e4444ba35f0e5 | 5,774 | // Generated from definition io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScale
/// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct CustomResourceSubresourceScale {
/// LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.
pub label_selector_path: Option<String>,
/// SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .spec. If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET.
pub spec_replicas_path: String,
/// StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource will default to 0.
pub status_replicas_path: String,
}
impl<'de> serde::Deserialize<'de> for CustomResourceSubresourceScale {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_label_selector_path,
Key_spec_replicas_path,
Key_status_replicas_path,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"labelSelectorPath" => Field::Key_label_selector_path,
"specReplicasPath" => Field::Key_spec_replicas_path,
"statusReplicasPath" => Field::Key_status_replicas_path,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = CustomResourceSubresourceScale;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("CustomResourceSubresourceScale")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_label_selector_path: Option<String> = None;
let mut value_spec_replicas_path: Option<String> = None;
let mut value_status_replicas_path: Option<String> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_label_selector_path => value_label_selector_path = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec_replicas_path => value_spec_replicas_path = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_status_replicas_path => value_status_replicas_path = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(CustomResourceSubresourceScale {
label_selector_path: value_label_selector_path,
spec_replicas_path: value_spec_replicas_path.ok_or_else(|| serde::de::Error::missing_field("specReplicasPath"))?,
status_replicas_path: value_status_replicas_path.ok_or_else(|| serde::de::Error::missing_field("statusReplicasPath"))?,
})
}
}
deserializer.deserialize_struct(
"CustomResourceSubresourceScale",
&[
"labelSelectorPath",
"specReplicasPath",
"statusReplicasPath",
],
Visitor,
)
}
}
impl serde::Serialize for CustomResourceSubresourceScale {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"CustomResourceSubresourceScale",
2 +
self.label_selector_path.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.label_selector_path {
serde::ser::SerializeStruct::serialize_field(&mut state, "labelSelectorPath", value)?;
}
serde::ser::SerializeStruct::serialize_field(&mut state, "specReplicasPath", &self.spec_replicas_path)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "statusReplicasPath", &self.status_replicas_path)?;
serde::ser::SerializeStruct::end(state)
}
}
| 52.972477 | 395 | 0.621233 |
7637f5a041ff86fbbd24aab79011d34023b563d9 | 1,705 | use super::{Expression, Rule};
use crate::quota::Quota;
use nftnl_sys::{self as sys};
use std::os::raw::c_char;
use std::ffi::CString;
/// A reference to quota obj expression adds a quota to the rule that is incremented to count number of bytes
/// for all packets that has matched the rule.
pub struct QuotaRef {
quota_name: CString,
}
impl QuotaRef {
pub fn new(quota: &Quota) -> Self {
QuotaRef {
quota_name: quota.get_name().to_owned(),
}
}
}
// pub fn new<T: AsRef<CStr>>(name: &T) -> Self {
// QuotaRef {
// quota_name: name.as_ref().to_owned(),
// }
// }
// trait IntoQuotaRef {
// fn into(self) -> QuotaRef;
// }
// impl<'a> IntoQuotaRef for &'a Quota {
// fn into(self) -> QuotaRef {
// QuotaRef::new(self.get_name().to_owned())
// }
// }
// impl IntoQuotaRef for CString {
// fn into(self) -> QuotaRef {
// QuotaRef::new(self.to_owned())
// }
// }
impl Expression for QuotaRef {
fn to_expr(&self, _rule: &Rule) -> *mut sys::nftnl_expr {
unsafe {
let expr = try_alloc!(sys::nftnl_expr_alloc(b"objref\0" as *const _ as *const c_char));
sys::nftnl_expr_set_str(
expr,
sys::NFTNL_EXPR_OBJREF_IMM_NAME as u16,
self.quota_name.as_ptr() as *const _ as *const c_char
);
sys::nftnl_expr_set_u32(
expr,
sys::NFTNL_EXPR_OBJREF_IMM_TYPE as u16,
sys::NFT_OBJECT_QUOTA as u32,
);
expr
}
}
}
#[macro_export]
macro_rules! nft_expr_quota {
($quota:expr) => {
$crate::expr::QuotaRef::new($quota)
};
}
| 23.680556 | 109 | 0.558358 |
5dbe7c85a99c47a3691a64619664707a4dcb32cc | 741 | //! USB structures and types
/// CCID USB descriptor
#[allow(non_snake_case)]
#[repr(C, packed)]
pub struct Descriptor {
pub bLength: u8,
pub bDescriptorType: u8,
pub bcdCCID: u16,
pub bMaxSlotIndex: u8,
pub bVoltageSupport: u8,
pub dwProtocols: u32,
pub dwDefaultClock: u32,
pub dwMaximumClock: u32,
pub bNumClockSupported: u8,
pub dwDataRate: u32,
pub dwMaxDataRate: u32,
pub bNumDataRatesSupported: u8,
pub dwMaxIFSD: u32,
pub dwSynchProtocols: u32,
pub dwMechanical: u32,
pub dwFeatures: u32,
pub dwMaxCCIDMessageLength: u32,
pub bClassGetResponse: u8,
pub bClassEnvelope: u8,
pub wLcdLayout: u16,
pub bPINSupport: u8,
pub bMaxCCIDBusySlots: u8,
}
| 24.7 | 36 | 0.682861 |
23605cd2fd91dfa54acf8dcfd0bbdb30f3c4a60a | 648 | // Confusing diagnostic when using variable as a type:
//
// Previous warnings indicate Foo is not used, when in fact it is
// used improperly as a variable or constant. New warning points
// out user may be trying to use variable as a type. Test demonstrates
// cases for both local variable and const.
fn main() {
let Baz: &str = "";
println!("{}", Baz::Bar); //~ ERROR: failed to resolve: use of undeclared type `Baz`
}
#[allow(non_upper_case_globals)]
pub const Foo: &str = "";
mod submod {
use super::Foo;
fn function() {
println!("{}", Foo::Bar); //~ ERROR: failed to resolve: use of undeclared type `Foo`
}
}
| 28.173913 | 92 | 0.658951 |
699250d0ac0912b68decf7a44894e1f49c5f7af0 | 1,807 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/vhdirk/gir-files.git)
// DO NOT EDIT
use crate::Filter;
use glib::object::Cast;
use glib::object::IsA;
use glib::translate::*;
use std::fmt;
glib::wrapper! {
#[doc(alias = "GMimeFilterWindows")]
pub struct FilterWindows(Object<ffi::GMimeFilterWindows, ffi::GMimeFilterWindowsClass>) @extends Filter;
match fn {
type_ => || ffi::g_mime_filter_windows_get_type(),
}
}
impl FilterWindows {
#[doc(alias = "g_mime_filter_windows_new")]
pub fn new(claimed_charset: &str) -> FilterWindows {
assert_initialized_main_thread!();
unsafe {
Filter::from_glib_full(ffi::g_mime_filter_windows_new(
claimed_charset.to_glib_none().0,
))
.unsafe_cast()
}
}
}
pub const NONE_FILTER_WINDOWS: Option<&FilterWindows> = None;
pub trait FilterWindowsExt: 'static {
#[doc(alias = "g_mime_filter_windows_is_windows_charset")]
fn is_windows_charset(&self) -> bool;
#[doc(alias = "g_mime_filter_windows_real_charset")]
fn real_charset(&self) -> Option<glib::GString>;
}
impl<O: IsA<FilterWindows>> FilterWindowsExt for O {
fn is_windows_charset(&self) -> bool {
unsafe {
from_glib(ffi::g_mime_filter_windows_is_windows_charset(
self.as_ref().to_glib_none().0,
))
}
}
fn real_charset(&self) -> Option<glib::GString> {
unsafe {
from_glib_none(ffi::g_mime_filter_windows_real_charset(
self.as_ref().to_glib_none().0,
))
}
}
}
impl fmt::Display for FilterWindows {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("FilterWindows")
}
}
| 27.378788 | 108 | 0.629773 |
91009bbe401aaee50046c8127afe1ccb33196eac | 183 | use super::*;
use assert2::assert;
#[test]
fn builds_empty_container_builder() {
// When
let result = ContainerBuilder::new();
// Then
assert!(result.is_empty());
}
| 15.25 | 41 | 0.63388 |
e8d6cdb2c9907090a778fd9ead961b8e6389ed3b | 12,147 | // Copyright 2015 Marius Ritter
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// A digraph represented by a dense adjacency matrix of fixed size
use traits::{BasicGraphMethods, GraphProperties, GraphErrors,
GraphAlgorithms, DirectedGraphAlgorithms};
/// A digraph represented by a dense adjacency matrix of fixed size
///
/// The cells of the adjacency matrix are assigned values of type Option<f64>
/// where a None value represents the absence of an edge.
#[derive(PartialEq, Clone, Debug)]
pub struct Digraph {
size: usize,
weights: Vec<Option<f64>>,
}
impl Digraph {
/// Creates a new digraph object with a fixed size.
pub fn new(size: usize) -> Digraph {
let weights: Vec<Option<f64>> = vec![None; size * size];
Digraph {
size: size,
weights: weights
}
}
/// Creates a new digraph object with a fixed size from a vector that is
/// the row-wise representation of an adjacency matrix.
pub fn from_vec(weights: &Vec<Option<f64>>) -> Digraph {
let size: usize
= (weights.len() as f64).powf(0.5f64).floor() as usize;
assert!(weights.len() == size * size,
"The weights vector is not of square dimension.");
Digraph {
size: size,
weights: weights.clone()
}
}
/// Returns the vector that is the row-wise representation of the
/// adjacency matrix of the digraph.
pub fn to_vec(&self) -> Vec<Option<f64>> {
self.weights.clone()
}
}
impl BasicGraphMethods for Digraph {
fn size(&self) -> usize {
self.size
}
fn get_edge(&self, edge: (usize, usize)) -> Option<f64> {
let (node1, node2): (usize, usize) = edge;
let size: usize = self.size();
assert!(node1 < size, "The first node index is out of range.");
assert!(node2 < size, "The second node index is out of range.");
self.weights[size * node1 + node2]
}
fn set_edge(&mut self, edge: (usize, usize), value: Option<f64>) {
let (node1, node2): (usize, usize) = edge;
let size: usize = self.size();
assert!(node1 < size, "The first node index is out of range.");
assert!(node2 < size, "The second node index is out of range.");
self.weights[size * node1 + node2] = value;
}
}
impl GraphProperties for Digraph {
}
impl GraphAlgorithms<Digraph> for Digraph {
fn johnson_graph(&self) -> Result<(Digraph, Vec<f64>), GraphErrors> {
let size = self.size();
let mut eweights: Vec<Option<f64>> = self.weights.clone();
for i in 0..size {
eweights.insert(size * (i + 1) + i, None);
}
eweights.extend(vec![Some(0.0f64); size + 1].into_iter());
let egraph = Digraph::from_vec(&eweights);
let heights: Vec<f64> = match egraph.bellman_ford(size) {
Ok(m) => { m.0.iter().map(|&x| x.unwrap()).collect() }
Err(f) => { return Err(f); }
};
let nweights = self.weights.iter().enumerate().map(|(i, &x)| match x {
Some(f) => Some(f + heights[i / size] - heights[i % size]),
None => None,
}).collect();
Ok((Digraph::from_vec(&nweights), heights))
}
}
impl DirectedGraphAlgorithms for Digraph {
}
// TESTS
#[cfg(test)]
mod test {
use traits::{BasicGraphMethods, GraphProperties, GraphErrors,
GraphAlgorithms, DirectedGraphAlgorithms};
#[test]
fn test_graph_construction_1() {
let w1 = vec![
None, Some(-1.0f64),
Some(4.0f64), None
];
let mut g = super::Digraph::from_vec(&w1);
let edges1 = vec![
g.get_edge((0, 0)), g.get_edge((0, 1)),
g.get_edge((1, 0)), g.get_edge((1, 1))
];
let set_0: Vec<usize> = vec![0];
let set_1: Vec<usize> = vec![1];
assert_eq!(edges1, w1);
assert_eq!(g.size(), 2);
assert_eq!(g.is_loopfree(), true);
assert_eq!(g.is_nonnegative(), false);
assert_eq!(g.is_complete(), true);
assert_eq!(g.is_symmetric(), false);
assert_eq!(g.predecessors(0), set_1);
assert_eq!(g.predecessors(1), set_0);
assert_eq!(g.successors(0), set_1);
assert_eq!(g.successors(1), set_0);
let w2 = vec![
Some(1.0f64), Some(2.0f64),
Some(3.0f64), Some(4.0f64),
];
g.set_edge((0, 0), w2[0]);
g.set_edge((0, 1), w2[1]);
g.set_edge((1, 0), w2[2]);
g.set_edge((1, 1), w2[3]);
let edges2 = vec![
g.get_edge((0, 0)), g.get_edge((0, 1)),
g.get_edge((1, 0)), g.get_edge((1, 1))
];
let set_01: Vec<usize> = vec![0, 1];
assert_eq!(edges2, w2);
assert_eq!(g.size(), 2);
assert_eq!(g.is_loopfree(), false);
assert_eq!(g.is_nonnegative(), true);
assert_eq!(g.is_complete(), true);
assert_eq!(g.is_symmetric(), false);
assert_eq!(g.predecessors(0), set_01);
assert_eq!(g.predecessors(1), set_01);
assert_eq!(g.successors(0), set_01);
assert_eq!(g.successors(1), set_01);
g.set_edge((0, 1), w2[2]);
assert_eq!(g.is_symmetric(), true);
}
#[test]
#[should_panic]
fn test_graph_construction_2() {
let w: Vec<Option<f64>> = vec![None, Some(1.0f64)];
super::Digraph::from_vec(&w);
}
#[test]
fn test_graph_construction_3() {
let g = super::Digraph::new(0);
let w: Vec<Option<f64>> = vec![];
assert_eq!(g.size(), 0);
assert_eq!(g.to_vec(), w);
}
#[test]
#[should_panic]
fn test_graph_construction_4() {
let g = super::Digraph::new(0);
g.get_edge((0, 0));
}
// From Wikipedia:
// https://en.wikipedia.org/w/index.php?
// title=Floyd%E2%80%93Warshall_algorithm&oldid=667601616
#[test]
fn test_shortest_paths_1() {
let g = super::Digraph::from_vec(&vec![
None, None, Some(-2.0f64), None,
Some(4.0f64), None, Some(3.0f64), None,
None, None, None, Some(2.0f64),
None, Some(-1.0f64), None, None
]);
let d: Result<(Vec<Option<f64>>, Vec<Option<usize>>), GraphErrors>
= Ok((vec![Some(0.0f64), Some(-1.0f64), Some(-2.0f64),
Some(0.0f64)],
vec![None, Some(3), Some(0), Some(2)]));
assert_eq!(g.dijkstra(0), Err(GraphErrors::ContainsNegativeEdge));
assert_eq!(g.johnson(0), d);
}
#[test]
fn test_shortest_paths_2() {
let g = super::Digraph::from_vec(&vec![
None, Some(2.0f64), Some(3.0f64), Some(1.0f64), None, None,
None, None, None, None, Some(4.0f64), None,
None, None, None, None, None, Some(7.0f64),
None, None, Some(8.0f64), None, Some(6.0f64), Some(5.0f64),
None, None, None, None, None, Some(5.0f64),
Some(3.0f64), None, None, None, None, None
]);
let d: Vec<Result<(Vec<Option<f64>>, Vec<Option<usize>>),
GraphErrors>> = vec![
Ok((vec![Some(0.0f64), Some(2.0f64), Some(3.0f64),
Some(1.0f64), Some(6.0f64), Some(6.0f64)],
vec![None, Some(0), Some(0), Some(0), Some(1), Some(3)])),
Ok((vec![Some(12.0f64), Some(0.0f64), Some(15.0f64),
Some(13.0f64), Some(4.0f64), Some(9.0f64)],
vec![Some(5), None, Some(0), Some(0), Some(1), Some(4)])),
Ok((vec![Some(10.0f64), Some(12.0f64), Some(0.0f64),
Some(11.0f64), Some(16.0f64), Some(7.0f64)],
vec![Some(5), Some(0), None, Some(0), Some(1), Some(2)])),
Ok((vec![Some(8.0f64), Some(10.0f64), Some(8.0f64),
Some(0.0f64), Some(6.0f64), Some(5.0f64)],
vec![Some(5), Some(0), Some(3), None, Some(3), Some(3)])),
Ok((vec![Some(8.0f64), Some(10.0f64), Some(11.0f64),
Some(9.0f64), Some(0.0f64), Some(5.0f64)],
vec![Some(5), Some(0), Some(0), Some(0), None, Some(4)])),
Ok((vec![Some(3.0f64), Some(5.0f64), Some(6.0f64),
Some(4.0f64), Some(9.0f64), Some(0.0f64)],
vec![Some(5), Some(0), Some(0), Some(0), Some(1), None]))
];
assert_eq!(g.dijkstra_all(), d);
assert_eq!(g.johnson_all(), d);
}
// From Wikipedia:
// https://en.wikipedia.org/w/index.php?
// title=Floyd%E2%80%93Warshall_algorithm&oldid=667601616
#[test]
fn test_shortest_paths_3() {
let g = super::Digraph::from_vec(&vec![
None, None, Some(-2.0f64), None,
Some(4.0f64), None, Some(3.0f64), None,
None, None, None, Some(2.0f64),
None, Some(-1.0f64), None, None
]);
let d: Vec<Result<(Vec<Option<f64>>, Vec<Option<usize>>),
GraphErrors>> = vec![
Ok((vec![Some(0.0f64), Some(-1.0f64), Some(-2.0f64),
Some(0.0f64)],
vec![None, Some(3), Some(0), Some(2)])),
Ok((vec![Some(4.0f64), Some(0.0f64), Some(2.0f64), Some(4.0f64)],
vec![Some(1), None, Some(0), Some(2)])),
Ok((vec![Some(5.0f64), Some(1.0f64), Some(0.0f64), Some(2.0f64)],
vec![Some(1), Some(3), None, Some(2)])),
Ok((vec![Some(3.0f64), Some(-1.0f64), Some(1.0f64), Some(0.0f64)],
vec![Some(1), Some(3), Some(0), None]))
];
assert_eq!(g.bellman_ford_all(), d);
assert_eq!(g.johnson_all(), d);
}
// From Wikipedia:
// https://en.wikipedia.org/w/index.php?
// title=Floyd%E2%80%93Warshall_algorithm&oldid=667601616
#[test]
fn test_shortest_paths_4() {
let g = super::Digraph::from_vec(&vec![
None, None, Some(-2.0f64), None,
Some(4.0f64), None, Some(3.0f64), None,
None, None, None, Some(2.0f64),
None, Some(-1.0f64), None, None
]);
let d = vec![
vec![Some(0.0f64), Some(-1.0f64), Some(-2.0f64), Some(0.0f64)],
vec![Some(4.0f64), Some(0.0f64), Some(2.0f64), Some(4.0f64)],
vec![Some(5.0f64), Some(1.0f64), Some(0.0f64), Some(2.0f64)],
vec![Some(3.0f64), Some(-1.0f64), Some(1.0f64), Some(0.0f64)]
];
let p = vec![
vec![None, Some(3), Some(0), Some(2)],
vec![Some(1), None, Some(0), Some(2)],
vec![Some(1), Some(3), None, Some(2)],
vec![Some(1), Some(3), Some(0), None]
];
assert_eq!(g.floyd_warshall(), (d, p));
}
// negative cycle
#[test]
fn test_shortest_paths_5() {
let g = super::Digraph::from_vec(&vec![
None, Some(1.0f64), None,
None, None, Some(2.0f64),
Some(-4.0f64), None, None
]);
assert_eq!(g.bellman_ford(0),
Err(GraphErrors::ContainsNegativeCycle));
assert_eq!(g.johnson(0), Err(GraphErrors::ContainsNegativeCycle));
}
#[test]
fn test_components_1() {
let g = super::Digraph::from_vec(&vec![
None, Some(1.0f64), None, None, None, None, Some(1.0f64),
None, None, Some(1.0f64), None, None, None, None,
Some(1.0f64), None, None, None, None, None, None,
None, None, Some(1.0f64), None, Some(1.0f64), None, None,
None, None, None, None, None, Some(1.0f64), None,
Some(1.0f64), None, None, Some(1.0f64), Some(1.0f64), None, None,
None, None, None, None, None, None, Some(1.0f64)
]);
let c0: Vec<usize> = vec![0, 1, 2];
let c1: Vec<usize> = vec![3, 4, 5];
let c2: Vec<usize> = vec![6];
assert_eq!(g.sc_components(), vec![c0, c1, c2]);
}
}
| 37.490741 | 78 | 0.532395 |
bf7093d2e8071935fd8f995756ac8cfba66f2859 | 659 | use pinwheel::prelude::*;
#[derive(builder, Default, new)]
#[new(default)]
pub struct Img {
#[builder]
pub alt: Option<String>,
#[builder]
pub src: Option<String>,
}
impl Component for Img {
fn into_node(self) -> Node {
details()
.class("image-details")
.child(
summary().class("image-details-summary").child(
img()
.class("image-img")
.attribute("alt", self.alt.clone())
.attribute("src", self.src.clone()),
),
)
.child(
div().class("image-viewer").child(
img()
.class("image-viewer-img")
.attribute("alt", self.alt)
.attribute("src", self.src),
),
)
.into_node()
}
}
| 18.828571 | 51 | 0.573596 |
0801d1b58169d97be917037909aeecd6db5f43bc | 1,678 | use std::cmp::{Eq, Ord, Ordering, PartialOrd};
use std::convert::From;
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Offset(pub usize);
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Debug)]
pub struct Key(pub String);
#[derive(Clone, Eq, Debug)]
pub struct KeyValuePair {
pub key: String,
pub value: String,
}
impl Ord for KeyValuePair {
fn cmp(&self, other: &Self) -> Ordering {
self.key.cmp(&other.key)
}
}
impl PartialOrd for KeyValuePair {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for KeyValuePair {
fn eq(&self, other: &Self) -> bool {
self.key == other.key && self.value == other.value
}
}
impl KeyValuePair {
pub fn new(key: String, value: String) -> KeyValuePair {
KeyValuePair { key, value }
}
}
// NodeType represents different node types in the BTree
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum NodeType {
// Internal nodes contain a vector of pointers to their children and a vector of keys
Internal(Vec<Offset>, Vec<Key>),
// Leaf nodes contain a vector of keys and values
Leaf(Vec<KeyValuePair>),
Unexpected,
}
// Converts a byte to a NodeType
impl From<u8> for NodeType {
fn from(orig: u8) -> NodeType {
match orig {
0x01 => NodeType::Internal(Vec::<Offset>::new(), Vec::<Key>::new()),
0x02 => NodeType::Leaf(Vec::<KeyValuePair>::new()),
_ => NodeType::Unexpected,
}
}
}
// Converts a NodeType to a byte
impl From<&NodeType> for u8 {
fn from(orig: &NodeType) -> u8 {
match orig {
NodeType::Internal(_, _) => 0x01,
NodeType::Leaf(_) => 0x02,
NodeType::Unexpected => 0x03,
}
}
}
| 22.986301 | 87 | 0.646007 |
efc547383e5768593d47c5d88a92291a04a304fc | 24,277 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
parser::ast::{
BinOp, Field, FunctionName, FunctionVisibility, Kind, ModuleIdent, PragmaProperty,
ResourceLoc, SpecApplyPattern, SpecBlockTarget, SpecConditionKind, StructName, UnaryOp,
Value, Var,
},
shared::{ast_debug::*, unique_map::UniqueMap, *},
};
use move_ir_types::location::*;
use std::{
collections::{BTreeMap, BTreeSet, VecDeque},
fmt,
};
//**************************************************************************************************
// Program
//**************************************************************************************************
#[derive(Debug)]
pub struct Program {
pub modules: UniqueMap<ModuleIdent, ModuleDefinition>,
pub scripts: BTreeMap<String, Script>,
}
//**************************************************************************************************
// Scripts
//**************************************************************************************************
#[derive(Debug)]
pub struct Script {
pub loc: Loc,
pub function_name: FunctionName,
pub function: Function,
pub specs: Vec<SpecBlock>,
}
//**************************************************************************************************
// Modules
//**************************************************************************************************
#[derive(Debug)]
pub struct ModuleDefinition {
pub loc: Loc,
pub is_source_module: bool,
pub structs: UniqueMap<StructName, StructDefinition>,
pub functions: UniqueMap<FunctionName, Function>,
pub specs: Vec<SpecBlock>,
}
//**************************************************************************************************
// Structs
//**************************************************************************************************
pub type Fields<T> = UniqueMap<Field, (usize, T)>;
#[derive(Debug, PartialEq)]
pub struct StructDefinition {
pub loc: Loc,
pub resource_opt: ResourceLoc,
pub type_parameters: Vec<(Name, Kind)>,
pub fields: StructFields,
}
#[derive(Debug, PartialEq)]
pub enum StructFields {
Defined(Fields<Type>),
Native(Loc),
}
//**************************************************************************************************
// Functions
//**************************************************************************************************
#[derive(PartialEq, Debug)]
pub struct FunctionSignature {
pub type_parameters: Vec<(Name, Kind)>,
pub parameters: Vec<(Var, Type)>,
pub return_type: Type,
}
#[derive(PartialEq, Debug)]
pub enum FunctionBody_ {
Defined(Sequence),
Native,
}
pub type FunctionBody = Spanned<FunctionBody_>;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct SpecId(usize);
#[derive(PartialEq, Debug)]
pub struct Function {
pub loc: Loc,
pub visibility: FunctionVisibility,
pub signature: FunctionSignature,
pub acquires: Vec<ModuleAccess>,
pub body: FunctionBody,
pub specs: BTreeMap<SpecId, SpecBlock>,
}
//**************************************************************************************************
// Specification Blocks
//**************************************************************************************************
#[derive(Debug, PartialEq)]
pub struct SpecBlock_ {
pub target: SpecBlockTarget,
pub members: Vec<SpecBlockMember>,
}
pub type SpecBlock = Spanned<SpecBlock_>;
#[derive(Debug, PartialEq)]
#[allow(clippy::large_enum_variant)]
pub enum SpecBlockMember_ {
Condition {
kind: SpecConditionKind,
exp: Exp,
},
Function {
name: FunctionName,
signature: FunctionSignature,
body: FunctionBody,
},
Variable {
is_global: bool,
name: Name,
type_parameters: Vec<(Name, Kind)>,
type_: Type,
},
Include {
exp: Exp,
},
Apply {
exp: Exp,
patterns: Vec<SpecApplyPattern>,
exclusion_patterns: Vec<SpecApplyPattern>,
},
Pragma {
properties: Vec<PragmaProperty>,
},
}
pub type SpecBlockMember = Spanned<SpecBlockMember_>;
//**************************************************************************************************
// Types
//**************************************************************************************************
#[derive(Debug, PartialEq, Clone)]
pub enum ModuleAccess_ {
Name(Name),
ModuleAccess(ModuleIdent, Name),
}
pub type ModuleAccess = Spanned<ModuleAccess_>;
#[derive(Debug, PartialEq)]
#[allow(clippy::large_enum_variant)]
pub enum Type_ {
Unit,
Multiple(Vec<Type>),
Apply(ModuleAccess, Vec<Type>),
Ref(bool, Box<Type>),
Fun(Vec<Type>, Box<Type>),
UnresolvedError,
}
pub type Type = Spanned<Type_>;
//**************************************************************************************************
// Expressions
//**************************************************************************************************
#[derive(Debug, PartialEq)]
pub enum LValue_ {
Var(ModuleAccess, Option<Vec<Type>>),
Unpack(ModuleAccess, Option<Vec<Type>>, Fields<LValue>),
}
pub type LValue = Spanned<LValue_>;
pub type LValueList_ = Vec<LValue>;
pub type LValueList = Spanned<LValueList_>;
#[derive(Debug, PartialEq)]
#[allow(clippy::large_enum_variant)]
pub enum ExpDotted_ {
Exp(Exp),
Dot(Box<ExpDotted>, Name),
}
pub type ExpDotted = Spanned<ExpDotted_>;
#[derive(Debug, PartialEq)]
#[allow(clippy::large_enum_variant)]
pub enum Exp_ {
Value(Value),
InferredNum(u128),
Move(Var),
Copy(Var),
Name(ModuleAccess, Option<Vec<Type>>),
GlobalCall(Name, Option<Vec<Type>>, Spanned<Vec<Exp>>),
Call(ModuleAccess, Option<Vec<Type>>, Spanned<Vec<Exp>>),
Pack(ModuleAccess, Option<Vec<Type>>, Fields<Exp>),
IfElse(Box<Exp>, Box<Exp>, Box<Exp>),
While(Box<Exp>, Box<Exp>),
Loop(Box<Exp>),
Block(Sequence),
Lambda(LValueList, Box<Exp>), // spec only
Assign(LValueList, Box<Exp>),
FieldMutate(Box<ExpDotted>, Box<Exp>),
Mutate(Box<Exp>, Box<Exp>),
Return(Box<Exp>),
Abort(Box<Exp>),
Break,
Continue,
Dereference(Box<Exp>),
UnaryExp(UnaryOp, Box<Exp>),
BinopExp(Box<Exp>, BinOp, Box<Exp>),
ExpList(Vec<Exp>),
Unit,
Borrow(bool, Box<Exp>),
ExpDotted(Box<ExpDotted>),
Index(Box<Exp>, Box<Exp>), // spec only (no mutation needed right now)
Cast(Box<Exp>, Type),
Annotate(Box<Exp>, Type),
Spec(SpecId, BTreeSet<Name>),
UnresolvedError,
}
pub type Exp = Spanned<Exp_>;
pub type Sequence = VecDeque<SequenceItem>;
#[derive(Debug, PartialEq)]
pub enum SequenceItem_ {
Seq(Exp),
Declare(LValueList, Option<Type>),
Bind(LValueList, Exp),
}
pub type SequenceItem = Spanned<SequenceItem_>;
//**************************************************************************************************
// impls
//**************************************************************************************************
impl SpecId {
pub fn new(u: usize) -> Self {
SpecId(u)
}
pub fn inner(self) -> usize {
self.0
}
}
//**************************************************************************************************
// Display
//**************************************************************************************************
impl fmt::Display for ModuleAccess_ {
fn fmt(&self, f: &mut fmt::Formatter) -> std::fmt::Result {
use ModuleAccess_::*;
match self {
Name(n) => write!(f, "{}", n),
ModuleAccess(m, n) => write!(f, "{}::{}", m, n),
}
}
}
impl fmt::Display for Type_ {
fn fmt(&self, f: &mut fmt::Formatter) -> std::fmt::Result {
use Type_::*;
match self {
UnresolvedError => write!(f, "_"),
Apply(n, tys) => {
write!(f, "{}", n)?;
if !tys.is_empty() {
write!(f, "<")?;
write!(f, "{}", format_comma(tys))?;
write!(f, ">")?;
}
Ok(())
}
Ref(mut_, ty) => write!(f, "&{}{}", if *mut_ { "mut " } else { "" }, ty),
Fun(args, result) => write!(f, "({}):{}", format_comma(args), result),
Unit => write!(f, "()"),
Multiple(tys) => {
write!(f, "(")?;
write!(f, "{}", format_comma(tys))?;
write!(f, ")")
}
}
}
}
impl fmt::Display for SpecId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
//**************************************************************************************************
// Debug
//**************************************************************************************************
impl AstDebug for Program {
fn ast_debug(&self, w: &mut AstWriter) {
let Program { modules, scripts } = self;
for (m, mdef) in modules {
w.write(&format!("module {}", m));
w.block(|w| mdef.ast_debug(w));
w.new_line();
}
for (n, s) in scripts {
w.write(&format!("script {}", n));
w.block(|w| s.ast_debug(w));
w.new_line()
}
}
}
impl AstDebug for Script {
fn ast_debug(&self, w: &mut AstWriter) {
let Script {
loc: _loc,
function_name,
function,
specs,
} = self;
(function_name.clone(), function).ast_debug(w);
for spec in specs {
spec.ast_debug(w);
w.new_line();
}
}
}
impl AstDebug for ModuleDefinition {
fn ast_debug(&self, w: &mut AstWriter) {
let ModuleDefinition {
loc: _loc,
is_source_module,
structs,
functions,
specs,
} = self;
w.writeln(if *is_source_module {
"source module"
} else {
"library module"
});
for sdef in structs {
sdef.ast_debug(w);
w.new_line();
}
for fdef in functions {
fdef.ast_debug(w);
w.new_line();
}
for spec in specs {
spec.ast_debug(w);
w.new_line();
}
}
}
impl AstDebug for (StructName, &StructDefinition) {
fn ast_debug(&self, w: &mut AstWriter) {
let (
name,
StructDefinition {
loc: _loc,
resource_opt,
type_parameters,
fields,
},
) = self;
if let StructFields::Native(_) = fields {
w.write("native ");
}
if resource_opt.is_some() {
w.write("resource ");
}
w.write(&format!("struct {}", name));
type_parameters.ast_debug(w);
if let StructFields::Defined(fields) = fields {
w.block(|w| {
w.list(fields, ",", |w, (f, idx_st)| {
let (idx, st) = idx_st;
w.write(&format!("{}#{}: ", idx, f));
st.ast_debug(w);
true
});
})
}
}
}
impl AstDebug for SpecBlock_ {
fn ast_debug(&self, w: &mut AstWriter) {
w.write(" spec ");
self.target.ast_debug(w);
w.write("{");
w.semicolon(&self.members, |w, m| m.ast_debug(w));
w.write("}");
}
}
impl AstDebug for SpecBlockMember_ {
fn ast_debug(&self, w: &mut AstWriter) {
match self {
SpecBlockMember_::Condition { kind, exp } => {
kind.ast_debug(w);
exp.ast_debug(w);
}
SpecBlockMember_::Function {
signature,
name,
body,
} => {
if let FunctionBody_::Native = &body.value {
w.write("native ");
}
w.write("define ");
w.write(&format!("{}", name));
signature.ast_debug(w);
match &body.value {
FunctionBody_::Defined(body) => w.block(|w| body.ast_debug(w)),
FunctionBody_::Native => w.writeln(";"),
}
}
SpecBlockMember_::Variable {
is_global,
name,
type_parameters,
type_,
} => {
if *is_global {
w.write("global ");
} else {
w.write("local");
}
w.write(&format!("{}", name));
type_parameters.ast_debug(w);
w.write(": ");
type_.ast_debug(w);
}
SpecBlockMember_::Include { exp } => {
w.write("include ");
exp.ast_debug(w);
}
SpecBlockMember_::Apply {
exp,
patterns,
exclusion_patterns,
} => {
w.write("apply ");
exp.ast_debug(w);
w.write(" to ");
w.list(patterns, ", ", |w, p| {
p.ast_debug(w);
true
});
if !exclusion_patterns.is_empty() {
w.write(" exclude ");
w.list(exclusion_patterns, ", ", |w, p| {
p.ast_debug(w);
true
});
}
}
SpecBlockMember_::Pragma { properties } => {
w.write("pragma ");
w.list(properties, ", ", |w, p| {
p.ast_debug(w);
true
});
}
}
}
}
impl AstDebug for (FunctionName, &Function) {
fn ast_debug(&self, w: &mut AstWriter) {
let (
name,
Function {
loc: _loc,
visibility,
signature,
acquires,
body,
..
},
) = self;
visibility.ast_debug(w);
if let FunctionBody_::Native = &body.value {
w.write("native ");
}
w.write(&format!("{}", name));
signature.ast_debug(w);
if !acquires.is_empty() {
w.write(" acquires ");
w.comma(acquires, |w, m| m.ast_debug(w));
w.write(" ");
}
match &body.value {
FunctionBody_::Defined(body) => w.block(|w| body.ast_debug(w)),
FunctionBody_::Native => w.writeln(";"),
}
}
}
impl AstDebug for FunctionSignature {
fn ast_debug(&self, w: &mut AstWriter) {
let FunctionSignature {
type_parameters,
parameters,
return_type,
} = self;
type_parameters.ast_debug(w);
w.write("(");
w.comma(parameters, |w, (v, st)| {
w.write(&format!("{}: ", v));
st.ast_debug(w);
});
w.write("): ");
return_type.ast_debug(w)
}
}
impl AstDebug for Type_ {
fn ast_debug(&self, w: &mut AstWriter) {
match self {
Type_::Unit => w.write("()"),
Type_::Multiple(ss) => {
w.write("(");
ss.ast_debug(w);
w.write(")")
}
Type_::Apply(m, ss) => {
m.ast_debug(w);
if !ss.is_empty() {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
}
Type_::Ref(mut_, s) => {
w.write("&");
if *mut_ {
w.write("mut ");
}
s.ast_debug(w)
}
Type_::Fun(args, result) => {
w.write("(");
w.comma(args, |w, ty| ty.ast_debug(w));
w.write("):");
result.ast_debug(w);
}
Type_::UnresolvedError => w.write("_|_"),
}
}
}
impl AstDebug for Vec<Type> {
fn ast_debug(&self, w: &mut AstWriter) {
w.comma(self, |w, s| s.ast_debug(w))
}
}
impl AstDebug for ModuleAccess_ {
fn ast_debug(&self, w: &mut AstWriter) {
w.write(&match self {
ModuleAccess_::Name(n) => format!("{}", n),
ModuleAccess_::ModuleAccess(m, n) => format!("{}::{}", m, n),
})
}
}
impl AstDebug for VecDeque<SequenceItem> {
fn ast_debug(&self, w: &mut AstWriter) {
w.semicolon(self, |w, item| item.ast_debug(w))
}
}
impl AstDebug for SequenceItem_ {
fn ast_debug(&self, w: &mut AstWriter) {
use SequenceItem_ as I;
match self {
I::Seq(e) => e.ast_debug(w),
I::Declare(sp!(_, bs), ty_opt) => {
w.write("let ");
bs.ast_debug(w);
if let Some(ty) = ty_opt {
ty.ast_debug(w)
}
}
I::Bind(sp!(_, bs), e) => {
w.write("let ");
bs.ast_debug(w);
w.write(" = ");
e.ast_debug(w);
}
}
}
}
impl AstDebug for Exp_ {
fn ast_debug(&self, w: &mut AstWriter) {
use Exp_ as E;
match self {
E::Unit => w.write("()"),
E::InferredNum(u) => w.write(&format!("{}", u)),
E::Value(v) => v.ast_debug(w),
E::Move(v) => w.write(&format!("move {}", v)),
E::Copy(v) => w.write(&format!("copy {}", v)),
E::Name(ma, tys_opt) => {
ma.ast_debug(w);
if let Some(ss) = tys_opt {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
}
E::GlobalCall(n, tys_opt, sp!(_, rhs)) => {
w.write(&format!("::{}", n));
if let Some(ss) = tys_opt {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
w.write("(");
w.comma(rhs, |w, e| e.ast_debug(w));
w.write(")");
}
E::Call(ma, tys_opt, sp!(_, rhs)) => {
ma.ast_debug(w);
if let Some(ss) = tys_opt {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
w.write("(");
w.comma(rhs, |w, e| e.ast_debug(w));
w.write(")");
}
E::Pack(ma, tys_opt, fields) => {
ma.ast_debug(w);
if let Some(ss) = tys_opt {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
w.write("{");
w.comma(fields, |w, (f, idx_e)| {
let (idx, e) = idx_e;
w.write(&format!("{}#{}: ", idx, f));
e.ast_debug(w);
});
w.write("}");
}
E::IfElse(b, t, f) => {
w.write("if (");
b.ast_debug(w);
w.write(") ");
t.ast_debug(w);
w.write(" else ");
f.ast_debug(w);
}
E::While(b, e) => {
w.write("while (");
b.ast_debug(w);
w.write(")");
e.ast_debug(w);
}
E::Loop(e) => {
w.write("loop ");
e.ast_debug(w);
}
E::Block(seq) => w.block(|w| seq.ast_debug(w)),
E::Lambda(sp!(_, bs), e) => {
w.write("fun ");
bs.ast_debug(w);
w.write(" ");
e.ast_debug(w);
}
E::ExpList(es) => {
w.write("(");
w.comma(es, |w, e| e.ast_debug(w));
w.write(")");
}
E::Assign(sp!(_, lvalues), rhs) => {
lvalues.ast_debug(w);
w.write(" = ");
rhs.ast_debug(w);
}
E::FieldMutate(ed, rhs) => {
ed.ast_debug(w);
w.write(" = ");
rhs.ast_debug(w);
}
E::Mutate(lhs, rhs) => {
w.write("*");
lhs.ast_debug(w);
w.write(" = ");
rhs.ast_debug(w);
}
E::Return(e) => {
w.write("return ");
e.ast_debug(w);
}
E::Abort(e) => {
w.write("abort ");
e.ast_debug(w);
}
E::Break => w.write("break"),
E::Continue => w.write("continue"),
E::Dereference(e) => {
w.write("*");
e.ast_debug(w)
}
E::UnaryExp(op, e) => {
op.ast_debug(w);
w.write(" ");
e.ast_debug(w);
}
E::BinopExp(l, op, r) => {
l.ast_debug(w);
w.write(" ");
op.ast_debug(w);
w.write(" ");
r.ast_debug(w)
}
E::Borrow(mut_, e) => {
w.write("&");
if *mut_ {
w.write("mut ");
}
e.ast_debug(w);
}
E::ExpDotted(ed) => ed.ast_debug(w),
E::Cast(e, ty) => {
w.write("(");
e.ast_debug(w);
w.write(" as ");
ty.ast_debug(w);
w.write(")");
}
E::Index(oper, index) => {
oper.ast_debug(w);
w.write("[");
index.ast_debug(w);
w.write("]");
}
E::Annotate(e, ty) => {
w.write("(");
e.ast_debug(w);
w.write(": ");
ty.ast_debug(w);
w.write(")");
}
E::Spec(u, unbound_names) => {
w.write(&format!("spec #{}", u));
if !unbound_names.is_empty() {
w.write("uses [");
w.comma(unbound_names, |w, n| w.write(&format!("{}", n)));
w.write("]");
}
}
E::UnresolvedError => w.write("_|_"),
}
}
}
impl AstDebug for ExpDotted_ {
fn ast_debug(&self, w: &mut AstWriter) {
use ExpDotted_ as D;
match self {
D::Exp(e) => e.ast_debug(w),
D::Dot(e, n) => {
e.ast_debug(w);
w.write(&format!(".{}", n))
}
}
}
}
impl AstDebug for Vec<LValue> {
fn ast_debug(&self, w: &mut AstWriter) {
let parens = self.len() != 1;
if parens {
w.write("(");
}
w.comma(self, |w, b| b.ast_debug(w));
if parens {
w.write(")");
}
}
}
impl AstDebug for LValue_ {
fn ast_debug(&self, w: &mut AstWriter) {
use LValue_ as L;
match self {
L::Var(v, tys_opt) => {
w.write(&format!("{}", v));
if let Some(ss) = tys_opt {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
}
L::Unpack(ma, tys_opt, fields) => {
ma.ast_debug(w);
if let Some(ss) = tys_opt {
w.write("<");
ss.ast_debug(w);
w.write(">");
}
w.write("{");
w.comma(fields, |w, (f, idx_b)| {
let (idx, b) = idx_b;
w.write(&format!("{}#{}: ", idx, f));
b.ast_debug(w);
});
w.write("}");
}
}
}
}
| 28.730178 | 100 | 0.395477 |
acc810f6bcf6a2d4c399fc1334d4982f1df7a2ee | 2,385 | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use secp256k1::{Message, PublicKey};
use sp_core::H160;
use sp_io::hashing::keccak_256;
pub type EthereumAddress = H160;
pub fn public_key_to_eth_address(pub_key: &PublicKey) -> EthereumAddress {
let hash = keccak_256(&pub_key.serialize()[1..]);
EthereumAddress::from_slice(&hash[12..])
}
pub fn prepare_message(msg: &[u8]) -> Message {
let msg = keccak_256(msg);
let mut prefix = b"\x19Ethereum Signed Message:\n32".to_vec();
prefix.extend(&msg);
let hash = keccak_256(&prefix);
Message::parse_slice(&hash).expect("hash size == 256 bits; qed")
}
| 48.673469 | 103 | 0.760587 |
2359c62fef1baa6d5a585ad6f1789a75c4d55966 | 2,175 | #![no_std]
#![no_main]
use panic_halt as _;
use wio_terminal as wio;
use wio::entry;
use wio::hal::clock::GenericClockController;
use wio::hal::delay::Delay;
use wio::hal::gpio::*;
use wio::hal::hal::spi;
use wio::hal::sercom::*;
use wio::pac::{CorePeripherals, Peripherals};
use wio::prelude::*;
fn l6470_send(spi: &mut SPIMaster5<Sercom5Pad2<Pb0<PfD>>, Sercom5Pad0<Pb2<PfD>>, Sercom5Pad1<Pb3<PfD>>>,
b : u8,
cs: &mut Pb1<Output<PushPull>>) -> () {
cs.set_low().unwrap();
spi.write(&[b]).unwrap();
cs.set_high().unwrap();
}
#[entry]
fn main() -> ! {
// 初期化処理
let mut peripherals = Peripherals::take().unwrap();
let core = CorePeripherals::take().unwrap();
let mut clocks = GenericClockController::with_external_32kosc(
peripherals.GCLK,
&mut peripherals.MCLK,
&mut peripherals.OSC32KCTRL,
&mut peripherals.OSCCTRL,
&mut peripherals.NVMCTRL,
);
let mut delay = Delay::new(core.SYST, &mut clocks);
let mut pins = wio::Pins::new(peripherals.PORT);
let gclk0 = &clocks.gclk0();
// SPIを初期化
let mut spi: SPIMaster5<
Sercom5Pad2<Pb0<PfD>>,
Sercom5Pad0<Pb2<PfD>>,
Sercom5Pad1<Pb3<PfD>>,
> = SPIMaster5::new(
&clocks.sercom5_core(&gclk0).unwrap(),
4.mhz(),
spi::MODE_3,
peripherals.SERCOM5,
&mut peripherals.MCLK,
(
pins.spi_miso.into_pad(&mut pins.port),
pins.spi_mosi.into_pad(&mut pins.port),
pins.spi_sck.into_pad(&mut pins.port),
),
);
let mut cs = pins.spi_cs.into_push_pull_output(&mut pins.port);
cs.set_high().unwrap();
let mut motor_clk = pins.i2c1_sda.into_push_pull_output(&mut pins.port);
/* ステッピングモータの初期化
*/
// しばらく何もしない(NOP)
l6470_send(&mut spi, 0x00, &mut cs);
l6470_send(&mut spi, 0x00, &mut cs);
l6470_send(&mut spi, 0x00, &mut cs);
l6470_send(&mut spi, 0x00, &mut cs);
// HOMEポジションへ
l6470_send(&mut spi, 0xC0, &mut cs);
// ステップ・クロックモードで動作
l6470_send(&mut spi, 0x59, &mut cs);
loop {
motor_clk.toggle();
delay.delay_us(25u16);
}
}
| 27.1875 | 104 | 0.60046 |
fb9629ada6a233f6706216c24a19362e59b8e2d7 | 1,704 | pub mod v2;
use super::Slate;
use crate::wallet::error::ErrorKind;
use std::convert::TryFrom;
use v2::SlateV2;
pub const CURRENT_SLATE_VERSION: u16 = 2;
pub const GRIN_BLOCK_HEADER_VERSION: u16 = 2;
/// Existing versions of the slate
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, PartialOrd, Eq, Ord)]
pub enum SlateVersion {
/// V2 (most current)
V2,
}
impl Default for SlateVersion {
fn default() -> Self {
SlateVersion::try_from(CURRENT_SLATE_VERSION).unwrap()
}
}
impl TryFrom<u16> for SlateVersion {
type Error = ErrorKind;
fn try_from(value: u16) -> Result<Self, Self::Error> {
match value {
2 => Ok(SlateVersion::V2),
v => Err(ErrorKind::SlateVersion(v)),
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
/// Versions are ordered newest to oldest so serde attempts to
/// deserialize newer versions first, then falls back to older versions.
pub enum VersionedSlate {
/// Current (Grin 1.1.0 - 2.x (current))
V2(SlateV2),
}
impl VersionedSlate {
/// Return slate version
pub fn version(&self) -> SlateVersion {
match *self {
VersionedSlate::V2(_) => SlateVersion::V2,
}
}
/// convert this slate type to a specified older version
pub fn into_version(slate: Slate, version: SlateVersion) -> VersionedSlate {
match version {
SlateVersion::V2 => VersionedSlate::V2(slate.into()),
}
}
}
impl From<VersionedSlate> for Slate {
fn from(slate: VersionedSlate) -> Slate {
match slate {
VersionedSlate::V2(s) => {
let s = SlateV2::from(s);
Slate::from(s)
}
}
}
}
impl From<&VersionedSlate> for Slate {
fn from(slate: &VersionedSlate) -> Slate {
match slate {
VersionedSlate::V2(s) => Slate::from(s),
}
}
}
| 21.846154 | 79 | 0.681338 |
89005b3f8f3b441e462027a602b00c5114193025 | 1,811 | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_datavalues::columns::DataColumn;
use common_datavalues::prelude::DataColumnsWithField;
use common_datavalues::DataSchema;
use common_datavalues::DataType;
use common_exception::Result;
use crate::scalars::Function;
#[derive(Clone)]
pub struct VersionFunction {
display_name: String,
}
impl VersionFunction {
pub fn try_create(display_name: &str) -> Result<Box<dyn Function>> {
Ok(Box::new(VersionFunction {
display_name: display_name.to_string(),
}))
}
}
impl Function for VersionFunction {
fn name(&self) -> &str {
"VersionFunction"
}
fn return_type(&self, _args: &[DataType]) -> Result<DataType> {
Ok(DataType::String)
}
fn nullable(&self, _input_schema: &DataSchema) -> Result<bool> {
Ok(false)
}
fn eval(&self, columns: &DataColumnsWithField, _input_rows: usize) -> Result<DataColumn> {
Ok(columns[0].column().clone())
}
fn num_arguments(&self) -> usize {
1
}
fn is_deterministic(&self) -> bool {
false
}
}
impl fmt::Display for VersionFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "version")
}
}
| 26.246377 | 94 | 0.6709 |
ef062b5b4e632a1717082fe3e5dec2ff66c1159c | 1,698 | #![warn(clippy::pedantic)]
#![allow(clippy::missing_errors_doc)]
pub mod geometry;
pub mod parser;
use crate::geometry::mesh::Mesh;
use crate::parser::options::Options;
use crate::parser::Parser;
use std::fs::File;
use std::io::{self, Read};
use std::path::Path;
#[derive(Debug)]
pub enum Error {
IOError(io::Error),
ParserError(crate::parser::error::Error),
}
impl std::error::Error for Error {}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Error::IOError(e) => write!(f, "IO Error: {}", e),
Error::ParserError(e) => write!(f, "Parser Error: {}", e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::IOError(e)
}
}
impl From<crate::parser::error::Error> for Error {
fn from(e: crate::parser::error::Error) -> Self {
Error::ParserError(e)
}
}
pub type Result<D = Mesh> = std::result::Result<D, Error>;
pub trait FromPath {
fn from_path<P: AsRef<Path>>(path: P, options: Options) -> Result;
}
impl FromPath for Mesh {
fn from_path<P: AsRef<Path>>(path: P, options: Options) -> Result {
let mut file = File::open(path).map_err(Error::IOError)?;
let mut string = String::new();
match file.read_to_string(&mut string) {
Ok(_) => {}
Err(inner) => return Err(Error::IOError(inner)),
};
Self::parse(&string, options)
}
}
pub trait Parse {
fn parse(string: &str, options: Options) -> Result;
}
impl Parse for Mesh {
fn parse(string: &str, options: Options) -> Result {
Parser::new(&string, options).parse()
}
}
| 23.583333 | 71 | 0.591284 |
5d640527ef478b241d24f49bd1354ed3fd76a86d | 675 | #![warn(clippy::all)]
#![allow(clippy::pedantic)]
#![forbid(unsafe_code)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
pub mod address;
pub mod client;
pub mod coin;
pub mod decimal;
pub mod error;
pub mod mnemonic;
pub mod msg;
pub mod private_key;
pub mod public_key;
pub mod signature;
pub mod utils;
pub use address::Address;
pub use client::Contact;
pub use coin::Coin;
pub use coin::Fee;
pub use mnemonic::Mnemonic;
pub use msg::Msg;
pub use private_key::MessageArgs;
pub use private_key::PrivateKey;
pub use public_key::PublicKey;
pub use signature::Signature;
pub use u64_array_bigints::u256;
pub use u64_array_bigints::U256 as Uint256;
| 19.285714 | 43 | 0.758519 |
9ba842e993334e7a0ef44304bad8e95889ad70ef | 5,812 | use super::texify;
use latex::{Element, Section};
/// A trait which implement useful functions for moderncv section
pub trait CVSection {
fn cventry(
&mut self,
year: &str,
job: &str,
employer: &str,
localization: &str,
grade: Option<&str>,
comment: Option<&str>,
) -> &mut Self;
fn cvlanguage(&mut self, name: &str, level: &str, comment: &str) -> &mut Self;
fn cvcomputer(
&mut self,
category: &str,
programs: &str,
category: &str,
programs: &str,
) -> &mut Self;
fn cvline(&mut self, leftmark: &str, text: &str) -> &mut Self;
fn cvitem(&mut self, header: &str, text: &str) -> &mut Self;
fn cvdoubleitem(&mut self, header1: &str, text1: &str, header2: &str, text2: &str)
-> &mut Self;
fn cvlistitem(&mut self, item: &str) -> &mut Self;
fn cvlistdoubleitem(&mut self, item1: &str, item2: &str) -> &mut Self;
fn cvitemwithcomment(&mut self, header: &str, text: &str, comment: &str) -> &mut Self;
}
impl CVSection for Section {
/// Make a typical resume job / education entry
fn cventry(
&mut self,
years: &str,
job: &str,
employer: &str,
localization: &str,
grade: Option<&str>,
comment: Option<&str>,
) -> &mut Self {
let elem = self::cventry(years, job, employer, localization, grade, comment);
self.push(elem);
self
}
/// Make a resume entry to describe language skills
fn cvlanguage(&mut self, name: &str, level: &str, comment: &str) -> &mut Self {
let elem = self::cvlanguage(name, level, comment);
self.push(elem);
self
}
/// Make a resume entry to describe computer skills
fn cvcomputer(
&mut self,
category1: &str,
programs1: &str,
category2: &str,
programs2: &str,
) -> &mut Self {
let elem = self::cvcomputer(category1, programs1, category2, programs2);
self.push(elem);
self
}
/// Make a resume line with a header and a corresponding text (Alias of `cvitem`)
fn cvline(&mut self, leftmark: &str, text: &str) -> &mut Self {
let elem = self::cvline(leftmark, text);
self.push(elem);
self
}
/// Make a resume line with a header and a corresponding text
fn cvitem(&mut self, header: &str, text: &str) -> &mut Self {
let elem = self::cvitem(header, text);
self.push(elem);
self
}
/// Make a resume line with two headers and their corresponding text
fn cvdoubleitem(
&mut self,
header1: &str,
text1: &str,
header2: &str,
text2: &str,
) -> &mut Self {
let elem = self::cvdoubleitem(header1, text1, header2, text2);
self.push(elem);
self
}
/// Make a resume line with a list item
fn cvlistitem(&mut self, item: &str) -> &mut Self {
let elem = self::cvlistitem(item);
self.push(elem);
self
}
/// Make a resume line with two list items
fn cvlistdoubleitem(&mut self, item1: &str, item2: &str) -> &mut Self {
let elem = self::cvlistdoubleitem(item1, item2);
self.push(elem);
self
}
/// Make a resume entry with a proficiency comment
fn cvitemwithcomment(&mut self, header: &str, text: &str, comment: &str) -> &mut Self {
let elem = self::cvitemwithcomment(header, text, comment);
self.push(elem);
self
}
}
/// Make a typical resume job / education entry
pub fn cventry(
years: &str,
job: &str,
employer: &str,
localization: &str,
grade: Option<&str>,
comment: Option<&str>,
) -> Element {
let mut s = texify!("cventry", years, job, employer, localization);
match grade {
Some(grade) => s.push_str(&format!(r"{{{}}}", grade)),
None => s.push_str(r"{}"),
}
match comment {
Some(comment) => s.push_str(&format!(r"{{{}}}", comment)),
None => s.push_str(r"{}"),
}
Element::UserDefined(s)
}
/// Make a resume entry to describe language skills
pub fn cvlanguage(name: &str, level: &str, comment: &str) -> Element {
let s = texify!("cvlanguage", name, level, comment);
Element::UserDefined(s)
}
/// Make a resume entry to describe computer skills
pub fn cvcomputer(category1: &str, programs1: &str, category2: &str, programs2: &str) -> Element {
let s = texify!("cvcomputer", category1, programs1, category2, programs2);
Element::UserDefined(s)
}
/// Make a resume line with a header and a corresponding text (Alias of `cvitem`)
pub fn cvline(header: &str, text: &str) -> Element {
let s = texify!("cvline", header, text);
Element::UserDefined(s)
}
/// Make a resume line with a header and a corresponding text
pub fn cvitem(header: &str, text: &str) -> Element {
let s = texify!("cvitem", header, text);
Element::UserDefined(s)
}
/// Make a resume line with two headers and their corresponding text
pub fn cvdoubleitem(header1: &str, text1: &str, header2: &str, text2: &str) -> Element {
let s = texify!("cvdoubleitem", header1, text1, header2, text2);
Element::UserDefined(s)
}
/// Make a resume line with a list item
pub fn cvlistitem(item: &str) -> Element {
let s = texify!("cvlistitem", item);
Element::UserDefined(s)
}
/// Make a resume line with two list items
pub fn cvlistdoubleitem(item1: &str, item2: &str) -> Element {
let s = texify!("cvlistdoubleitem", item1, item2);
Element::UserDefined(s)
}
/// Make a resume entry with a proficiency comment
pub fn cvitemwithcomment(header: &str, text: &str, comment: &str) -> Element {
let s = texify!("cvitemwithcomment", header, text, comment);
Element::UserDefined(s)
}
| 29.502538 | 98 | 0.596524 |
33628ba98df04aced75c007fd4ca676266b28162 | 1,377 |
pub struct IconShowChart {
props: crate::Props,
}
impl yew::Component for IconShowChart {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M3.5 18.49l6-6.01 4 4L22 6.92l-1.41-1.41-7.09 7.97-4-4L2 16.99l1.5 1.5z"/></svg>
</svg>
}
}
}
| 29.934783 | 184 | 0.564996 |
d5f2e27a6823e4b12ebe4c530609c9ea7d10e87c | 9,314 | #![allow(clippy::unwrap_used)]
use super::*;
use crate::common::test_utils::{CryptoRegistryKey, CryptoRegistryRecord};
use ic_crypto_internal_basic_sig_ecdsa_secp256r1 as ecdsa_secp256r1;
use ic_crypto_internal_csp::secret_key_store::SecretKeyStore;
use ic_crypto_internal_csp::types::CspSecretKey;
use ic_crypto_internal_csp_test_utils::secret_key_store_test_utils::{
MockSecretKeyStore, TempSecretKeyStore,
};
use ic_interfaces::crypto::DOMAIN_IC_REQUEST;
use ic_protobuf::registry::crypto::v1::AlgorithmId as AlgorithmIdProto;
use ic_protobuf::registry::crypto::v1::PublicKey as PublicKeyProto;
use ic_registry_client::fake::FakeRegistryClient;
use ic_registry_common::proto_registry_data_provider::ProtoRegistryDataProvider;
use ic_registry_keys::make_crypto_node_key;
use ic_test_utilities::registry::MockRegistryClient;
use ic_test_utilities::types::ids::{NODE_1, SUBNET_27};
use ic_types::crypto::{AlgorithmId, KeyId, KeyPurpose};
use ic_types::messages::MessageId;
use ic_types::registry::RegistryClientError;
use ic_types::RegistryVersion;
use openssl::sha::sha256;
pub const KEY_ID_1: [u8; 32] = [0u8; 32];
pub const KEY_ID_2: [u8; 32] = [1u8; 32];
// We don't use registry version 0 and 1 as they might be used as default
// versions.
pub const REG_V1: RegistryVersion = RegistryVersion::new(2);
pub const REG_V2: RegistryVersion = RegistryVersion::new(3);
pub const KEY_ID: [u8; 32] = KEY_ID_1;
pub const SUBNET_1: SubnetId = SUBNET_27;
pub const SUBNET_ID: SubnetId = SUBNET_1;
pub fn node_signing_record_with(
node_id: NodeId,
public_key: Vec<u8>,
_key_id: KeyId,
registry_version: RegistryVersion,
) -> CryptoRegistryRecord {
CryptoRegistryRecord {
key: CryptoRegistryKey {
node_id,
key_purpose: KeyPurpose::NodeSigning,
},
value: PublicKeyProto {
algorithm: AlgorithmIdProto::Ed25519 as i32,
key_value: public_key,
version: 0,
proof_data: None,
},
registry_version,
}
}
pub fn committee_signing_record_with(
node_id: NodeId,
public_key: Vec<u8>,
_key_id: KeyId,
registry_version: RegistryVersion,
) -> CryptoRegistryRecord {
CryptoRegistryRecord {
key: CryptoRegistryKey {
node_id,
key_purpose: KeyPurpose::CommitteeSigning,
},
value: PublicKeyProto {
algorithm: AlgorithmIdProto::MultiBls12381 as i32,
key_value: public_key,
version: 0,
proof_data: None,
},
registry_version,
}
}
pub fn dealing_encryption_pk_record_with(
node_id: NodeId,
key_value: Vec<u8>,
registry_version: RegistryVersion,
) -> CryptoRegistryRecord {
CryptoRegistryRecord {
key: CryptoRegistryKey {
node_id,
key_purpose: KeyPurpose::DkgDealingEncryption,
},
value: PublicKeyProto {
algorithm: AlgorithmId::Groth20_Bls12_381 as i32,
key_value,
version: 0,
proof_data: None,
},
registry_version,
}
}
#[allow(dead_code)]
pub fn secret_key_store_with(key_id: KeyId, secret_key: CspSecretKey) -> impl SecretKeyStore {
let mut temp_store = TempSecretKeyStore::new();
let scope = None;
temp_store.insert(key_id, secret_key, scope).unwrap();
temp_store
}
pub fn to_new_registry_record(
record: &CryptoRegistryRecord,
) -> (String, RegistryVersion, PublicKeyProto) {
let key = make_crypto_node_key(record.key.node_id, record.key.key_purpose);
let pk = PublicKeyProto {
algorithm: record.value.algorithm as i32,
key_value: record.value.key_value.clone(),
version: 0,
proof_data: None,
};
(key, record.registry_version, pk)
}
pub fn registry_with(key_record: CryptoRegistryRecord) -> Arc<dyn RegistryClient> {
let data_provider = Arc::new(ProtoRegistryDataProvider::new());
let (key, version, value) = to_new_registry_record(&key_record);
data_provider
.add(&key, version, Some(value))
.expect("Could not extend registry");
let registry_client = Arc::new(FakeRegistryClient::new(data_provider));
// Need to poll the data provider at least once to update the cache.
registry_client.update_to_latest_version();
registry_client
}
pub fn registry_with_records(key_records: Vec<CryptoRegistryRecord>) -> Arc<dyn RegistryClient> {
let data_provider = Arc::new(ProtoRegistryDataProvider::new());
for key_record in key_records {
let (key, version, value) = to_new_registry_record(&key_record);
data_provider
.add(&key, version, Some(value))
.expect("Could not extend registry");
}
let registry_client = Arc::new(FakeRegistryClient::new(data_provider));
// Need to poll the data provider at least once to update the cache.
registry_client.update_to_latest_version();
registry_client
}
pub fn registry_returning_none() -> Arc<dyn RegistryClient> {
let mut registry = MockRegistryClient::new();
registry.expect_get_value().return_const(Ok(None));
registry
.expect_get_versioned_value()
.returning(|key, version| {
Ok(ic_interfaces::registry::RegistryVersionedRecord {
key: key.to_string(),
version,
value: None,
})
});
Arc::new(registry)
}
// TODO(DFN-1397): add exact error checks to the tests that
// expect a specific error.
#[allow(dead_code)]
pub fn registry_returning(error: RegistryClientError) -> Arc<dyn RegistryClient> {
let mut registry = MockRegistryClient::new();
registry
.expect_get_value()
.returning(move |_, _| Err(error.clone()));
Arc::new(registry)
}
#[allow(dead_code)]
pub fn secret_key_store_returning_none() -> impl SecretKeyStore {
let mut sks = MockSecretKeyStore::new();
sks.expect_get().return_const(None);
sks
}
pub fn secret_key_store_panicking_on_usage() -> impl SecretKeyStore {
let mut sks = MockSecretKeyStore::new();
sks.expect_insert().never();
sks.expect_get().never();
sks.expect_contains().never();
sks.expect_remove().never();
sks
}
#[test]
#[should_panic]
pub fn should_panic_when_panicking_secret_key_store_is_used() {
let sks = secret_key_store_panicking_on_usage();
let _ = sks.get(&KeyId::from(KEY_ID));
}
// Note: it is not necessary to explicitly set the expectation that the
// various methods of the trait are _never_ called with code like this
// ```
// let mut registry = MockRegistryClient::new();
// registry.expect_get_value().never();
// ```
// because this is the default behavior of mocks created with the mocking
// framework that we use (https://crates.io/crates/mockall)
pub fn registry_panicking_on_usage() -> Arc<dyn RegistryClient> {
Arc::new(MockRegistryClient::new())
}
#[test]
#[should_panic]
fn should_panic_when_panicking_registry_is_used() {
let registry = registry_panicking_on_usage();
let key = make_crypto_node_key(NODE_1, KeyPurpose::QueryResponseSigning);
let _ = registry.get_value(&key, REG_V1);
}
#[allow(dead_code)]
pub fn dummy_registry() -> Arc<dyn RegistryClient> {
Arc::new(FakeRegistryClient::new(Arc::new(
ProtoRegistryDataProvider::new(),
)))
}
#[allow(dead_code)]
pub fn dummy_secret_key_store() -> impl SecretKeyStore {
MockSecretKeyStore::new()
}
pub fn request_id_signature_and_public_key_with_domain_separator(
domain_separator: &[u8],
request_id: &MessageId,
algorithm_id: AlgorithmId,
) -> (BasicSigOf<MessageId>, UserPublicKey) {
use ed25519_dalek::Signer;
let bytes_to_sign = {
let mut buf = vec![];
buf.extend_from_slice(domain_separator);
buf.extend_from_slice(request_id.as_bytes());
buf
};
let (pk_vec, signature_bytes_vec) = {
match algorithm_id {
AlgorithmId::EcdsaP256 => {
let (sk, pk) = ecdsa_secp256r1::new_keypair().unwrap();
let msg_hash = sha256(&bytes_to_sign);
(
pk.0,
ecdsa_secp256r1::api::sign(&msg_hash, &sk)
.unwrap()
.0
.to_vec(),
)
}
AlgorithmId::Ed25519 => {
let ed25519_keypair = {
let mut rng = OsRng::default();
ed25519_dalek::Keypair::generate(&mut rng)
};
(
ed25519_keypair.public.to_bytes().to_vec(),
ed25519_keypair.sign(&bytes_to_sign).to_bytes().to_vec(),
)
}
_ => panic!["unexpected algorithm id {:?}", algorithm_id],
}
};
let signature: BasicSigOf<MessageId> = BasicSigOf::new(BasicSig(signature_bytes_vec));
let public_key = UserPublicKey {
key: pk_vec,
algorithm_id,
};
(signature, public_key)
}
pub fn request_id_signature_and_public_key(
request_id: &MessageId,
algorithm_id: AlgorithmId,
) -> (BasicSigOf<MessageId>, UserPublicKey) {
request_id_signature_and_public_key_with_domain_separator(
DOMAIN_IC_REQUEST,
request_id,
algorithm_id,
)
}
| 33.028369 | 97 | 0.664161 |
38d1a6085a5e2aae5fd39931dd6549adb46b6c14 | 1,018 | use auxiliary_macros::remove_attr;
use pin_project::pin_project;
use std::{marker::PhantomPinned, pin::Pin};
fn is_unpin<T: Unpin>() {}
#[pin_project]
#[remove_attr(struct_all)]
struct A {
#[pin] //~ ERROR cannot find attribute `pin` in this scope
f: PhantomPinned,
}
#[remove_attr(struct_all)]
#[pin_project]
struct B {
#[pin] //~ ERROR cannot find attribute `pin` in this scope
f: PhantomPinned,
}
#[pin_project] //~ ERROR has been removed
#[remove_attr(struct_pin)]
struct C {
f: PhantomPinned,
}
#[remove_attr(struct_pin)]
#[pin_project] // Ok
struct D {
f: PhantomPinned,
}
fn main() {
is_unpin::<A>(); //~ ERROR E0277
is_unpin::<B>(); //~ ERROR E0277
is_unpin::<D>(); // Ok
let mut x = A { f: PhantomPinned };
let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599
let mut x = B { f: PhantomPinned };
let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599
let mut x = D { f: PhantomPinned };
let _ = Pin::new(&mut x).project(); //~ Ok
}
| 21.659574 | 62 | 0.621807 |
f5b26de54f5195bcd98176b59f2c5dba4e35b57f | 80 | // compile-pass
#[allow(dead_code)]
static X: &'static str = &*"";
fn main() {}
| 16 | 30 | 0.5875 |
764bef0d95f0a437e4d4ca95fb7b9c43b04f9211 | 16,295 | #[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - Start HFCLK crystal oscillator"]
pub tasks_hfclkstart: TASKS_HFCLKSTART,
#[doc = "0x04 - Stop HFCLK crystal oscillator"]
pub tasks_hfclkstop: TASKS_HFCLKSTOP,
#[doc = "0x08 - Start LFCLK source"]
pub tasks_lfclkstart: TASKS_LFCLKSTART,
#[doc = "0x0c - Stop LFCLK source"]
pub tasks_lfclkstop: TASKS_LFCLKSTOP,
#[doc = "0x10 - Start calibration of LFRC oscillator"]
pub tasks_cal: TASKS_CAL,
#[doc = "0x14 - Start calibration timer"]
pub tasks_ctstart: TASKS_CTSTART,
#[doc = "0x18 - Stop calibration timer"]
pub tasks_ctstop: TASKS_CTSTOP,
_reserved7: [u8; 228usize],
#[doc = "0x100 - HFCLK oscillator started"]
pub events_hfclkstarted: EVENTS_HFCLKSTARTED,
#[doc = "0x104 - LFCLK started"]
pub events_lfclkstarted: EVENTS_LFCLKSTARTED,
_reserved9: [u8; 4usize],
#[doc = "0x10c - Calibration of LFCLK RC oscillator complete event"]
pub events_done: EVENTS_DONE,
#[doc = "0x110 - Calibration timer timeout"]
pub events_ctto: EVENTS_CTTO,
_reserved11: [u8; 496usize],
#[doc = "0x304 - Enable interrupt"]
pub intenset: INTENSET,
#[doc = "0x308 - Disable interrupt"]
pub intenclr: INTENCLR,
_reserved13: [u8; 252usize],
#[doc = "0x408 - Status indicating that HFCLKSTART task has been triggered"]
pub hfclkrun: HFCLKRUN,
#[doc = "0x40c - HFCLK status"]
pub hfclkstat: HFCLKSTAT,
_reserved15: [u8; 4usize],
#[doc = "0x414 - Status indicating that LFCLKSTART task has been triggered"]
pub lfclkrun: LFCLKRUN,
#[doc = "0x418 - LFCLK status"]
pub lfclkstat: LFCLKSTAT,
#[doc = "0x41c - Copy of LFCLKSRC register, set when LFCLKSTART task was triggered"]
pub lfclksrccopy: LFCLKSRCCOPY,
_reserved18: [u8; 248usize],
#[doc = "0x518 - Clock source for the LFCLK"]
pub lfclksrc: LFCLKSRC,
_reserved19: [u8; 28usize],
#[doc = "0x538 - Calibration timer interval"]
pub ctiv: CTIV,
}
#[doc = "Start HFCLK crystal oscillator\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_hfclkstart](tasks_hfclkstart) module"]
pub type TASKS_HFCLKSTART = crate::Reg<u32, _TASKS_HFCLKSTART>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_HFCLKSTART;
#[doc = "`write(|w| ..)` method takes [tasks_hfclkstart::W](tasks_hfclkstart::W) writer structure"]
impl crate::Writable for TASKS_HFCLKSTART {}
#[doc = "Start HFCLK crystal oscillator"]
pub mod tasks_hfclkstart;
#[doc = "Stop HFCLK crystal oscillator\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_hfclkstop](tasks_hfclkstop) module"]
pub type TASKS_HFCLKSTOP = crate::Reg<u32, _TASKS_HFCLKSTOP>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_HFCLKSTOP;
#[doc = "`write(|w| ..)` method takes [tasks_hfclkstop::W](tasks_hfclkstop::W) writer structure"]
impl crate::Writable for TASKS_HFCLKSTOP {}
#[doc = "Stop HFCLK crystal oscillator"]
pub mod tasks_hfclkstop;
#[doc = "Start LFCLK source\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_lfclkstart](tasks_lfclkstart) module"]
pub type TASKS_LFCLKSTART = crate::Reg<u32, _TASKS_LFCLKSTART>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_LFCLKSTART;
#[doc = "`write(|w| ..)` method takes [tasks_lfclkstart::W](tasks_lfclkstart::W) writer structure"]
impl crate::Writable for TASKS_LFCLKSTART {}
#[doc = "Start LFCLK source"]
pub mod tasks_lfclkstart;
#[doc = "Stop LFCLK source\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_lfclkstop](tasks_lfclkstop) module"]
pub type TASKS_LFCLKSTOP = crate::Reg<u32, _TASKS_LFCLKSTOP>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_LFCLKSTOP;
#[doc = "`write(|w| ..)` method takes [tasks_lfclkstop::W](tasks_lfclkstop::W) writer structure"]
impl crate::Writable for TASKS_LFCLKSTOP {}
#[doc = "Stop LFCLK source"]
pub mod tasks_lfclkstop;
#[doc = "Start calibration of LFRC oscillator\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_cal](tasks_cal) module"]
pub type TASKS_CAL = crate::Reg<u32, _TASKS_CAL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_CAL;
#[doc = "`write(|w| ..)` method takes [tasks_cal::W](tasks_cal::W) writer structure"]
impl crate::Writable for TASKS_CAL {}
#[doc = "Start calibration of LFRC oscillator"]
pub mod tasks_cal;
#[doc = "Start calibration timer\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_ctstart](tasks_ctstart) module"]
pub type TASKS_CTSTART = crate::Reg<u32, _TASKS_CTSTART>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_CTSTART;
#[doc = "`write(|w| ..)` method takes [tasks_ctstart::W](tasks_ctstart::W) writer structure"]
impl crate::Writable for TASKS_CTSTART {}
#[doc = "Start calibration timer"]
pub mod tasks_ctstart;
#[doc = "Stop calibration timer\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_ctstop](tasks_ctstop) module"]
pub type TASKS_CTSTOP = crate::Reg<u32, _TASKS_CTSTOP>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _TASKS_CTSTOP;
#[doc = "`write(|w| ..)` method takes [tasks_ctstop::W](tasks_ctstop::W) writer structure"]
impl crate::Writable for TASKS_CTSTOP {}
#[doc = "Stop calibration timer"]
pub mod tasks_ctstop;
#[doc = "HFCLK oscillator started\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_hfclkstarted](events_hfclkstarted) module"]
pub type EVENTS_HFCLKSTARTED = crate::Reg<u32, _EVENTS_HFCLKSTARTED>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _EVENTS_HFCLKSTARTED;
#[doc = "`read()` method returns [events_hfclkstarted::R](events_hfclkstarted::R) reader structure"]
impl crate::Readable for EVENTS_HFCLKSTARTED {}
#[doc = "`write(|w| ..)` method takes [events_hfclkstarted::W](events_hfclkstarted::W) writer structure"]
impl crate::Writable for EVENTS_HFCLKSTARTED {}
#[doc = "HFCLK oscillator started"]
pub mod events_hfclkstarted;
#[doc = "LFCLK started\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_lfclkstarted](events_lfclkstarted) module"]
pub type EVENTS_LFCLKSTARTED = crate::Reg<u32, _EVENTS_LFCLKSTARTED>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _EVENTS_LFCLKSTARTED;
#[doc = "`read()` method returns [events_lfclkstarted::R](events_lfclkstarted::R) reader structure"]
impl crate::Readable for EVENTS_LFCLKSTARTED {}
#[doc = "`write(|w| ..)` method takes [events_lfclkstarted::W](events_lfclkstarted::W) writer structure"]
impl crate::Writable for EVENTS_LFCLKSTARTED {}
#[doc = "LFCLK started"]
pub mod events_lfclkstarted;
#[doc = "Calibration of LFCLK RC oscillator complete event\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_done](events_done) module"]
pub type EVENTS_DONE = crate::Reg<u32, _EVENTS_DONE>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _EVENTS_DONE;
#[doc = "`read()` method returns [events_done::R](events_done::R) reader structure"]
impl crate::Readable for EVENTS_DONE {}
#[doc = "`write(|w| ..)` method takes [events_done::W](events_done::W) writer structure"]
impl crate::Writable for EVENTS_DONE {}
#[doc = "Calibration of LFCLK RC oscillator complete event"]
pub mod events_done;
#[doc = "Calibration timer timeout\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_ctto](events_ctto) module"]
pub type EVENTS_CTTO = crate::Reg<u32, _EVENTS_CTTO>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _EVENTS_CTTO;
#[doc = "`read()` method returns [events_ctto::R](events_ctto::R) reader structure"]
impl crate::Readable for EVENTS_CTTO {}
#[doc = "`write(|w| ..)` method takes [events_ctto::W](events_ctto::W) writer structure"]
impl crate::Writable for EVENTS_CTTO {}
#[doc = "Calibration timer timeout"]
pub mod events_ctto;
#[doc = "Enable interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenset](intenset) module"]
pub type INTENSET = crate::Reg<u32, _INTENSET>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTENSET;
#[doc = "`read()` method returns [intenset::R](intenset::R) reader structure"]
impl crate::Readable for INTENSET {}
#[doc = "`write(|w| ..)` method takes [intenset::W](intenset::W) writer structure"]
impl crate::Writable for INTENSET {}
#[doc = "Enable interrupt"]
pub mod intenset;
#[doc = "Disable interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenclr](intenclr) module"]
pub type INTENCLR = crate::Reg<u32, _INTENCLR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTENCLR;
#[doc = "`read()` method returns [intenclr::R](intenclr::R) reader structure"]
impl crate::Readable for INTENCLR {}
#[doc = "`write(|w| ..)` method takes [intenclr::W](intenclr::W) writer structure"]
impl crate::Writable for INTENCLR {}
#[doc = "Disable interrupt"]
pub mod intenclr;
#[doc = "Status indicating that HFCLKSTART task has been triggered\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [hfclkrun](hfclkrun) module"]
pub type HFCLKRUN = crate::Reg<u32, _HFCLKRUN>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _HFCLKRUN;
#[doc = "`read()` method returns [hfclkrun::R](hfclkrun::R) reader structure"]
impl crate::Readable for HFCLKRUN {}
#[doc = "Status indicating that HFCLKSTART task has been triggered"]
pub mod hfclkrun;
#[doc = "HFCLK status\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [hfclkstat](hfclkstat) module"]
pub type HFCLKSTAT = crate::Reg<u32, _HFCLKSTAT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _HFCLKSTAT;
#[doc = "`read()` method returns [hfclkstat::R](hfclkstat::R) reader structure"]
impl crate::Readable for HFCLKSTAT {}
#[doc = "HFCLK status"]
pub mod hfclkstat;
#[doc = "Status indicating that LFCLKSTART task has been triggered\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lfclkrun](lfclkrun) module"]
pub type LFCLKRUN = crate::Reg<u32, _LFCLKRUN>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _LFCLKRUN;
#[doc = "`read()` method returns [lfclkrun::R](lfclkrun::R) reader structure"]
impl crate::Readable for LFCLKRUN {}
#[doc = "Status indicating that LFCLKSTART task has been triggered"]
pub mod lfclkrun;
#[doc = "LFCLK status\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lfclkstat](lfclkstat) module"]
pub type LFCLKSTAT = crate::Reg<u32, _LFCLKSTAT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _LFCLKSTAT;
#[doc = "`read()` method returns [lfclkstat::R](lfclkstat::R) reader structure"]
impl crate::Readable for LFCLKSTAT {}
#[doc = "LFCLK status"]
pub mod lfclkstat;
#[doc = "Copy of LFCLKSRC register, set when LFCLKSTART task was triggered\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lfclksrccopy](lfclksrccopy) module"]
pub type LFCLKSRCCOPY = crate::Reg<u32, _LFCLKSRCCOPY>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _LFCLKSRCCOPY;
#[doc = "`read()` method returns [lfclksrccopy::R](lfclksrccopy::R) reader structure"]
impl crate::Readable for LFCLKSRCCOPY {}
#[doc = "Copy of LFCLKSRC register, set when LFCLKSTART task was triggered"]
pub mod lfclksrccopy;
#[doc = "Clock source for the LFCLK\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lfclksrc](lfclksrc) module"]
pub type LFCLKSRC = crate::Reg<u32, _LFCLKSRC>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _LFCLKSRC;
#[doc = "`read()` method returns [lfclksrc::R](lfclksrc::R) reader structure"]
impl crate::Readable for LFCLKSRC {}
#[doc = "`write(|w| ..)` method takes [lfclksrc::W](lfclksrc::W) writer structure"]
impl crate::Writable for LFCLKSRC {}
#[doc = "Clock source for the LFCLK"]
pub mod lfclksrc;
#[doc = "Calibration timer interval\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ctiv](ctiv) module"]
pub type CTIV = crate::Reg<u32, _CTIV>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _CTIV;
#[doc = "`read()` method returns [ctiv::R](ctiv::R) reader structure"]
impl crate::Readable for CTIV {}
#[doc = "`write(|w| ..)` method takes [ctiv::W](ctiv::W) writer structure"]
impl crate::Writable for CTIV {}
#[doc = "Calibration timer interval"]
pub mod ctiv;
| 65.705645 | 442 | 0.721694 |
1e75898515c3d7cc2d161ba68a5cf1e0f794775c | 5,137 | use crate::errors::ShellError;
use std::ops::Div;
use std::path::{Path, PathBuf};
pub struct AbsolutePath {
inner: PathBuf,
}
impl AbsolutePath {
pub fn new(path: impl AsRef<Path>) -> AbsolutePath {
let path = path.as_ref();
if path.is_absolute() {
AbsolutePath {
inner: path.to_path_buf(),
}
} else {
panic!("AbsolutePath::new must take an absolute path")
}
}
}
impl Div<&str> for &AbsolutePath {
type Output = AbsolutePath;
fn div(self, rhs: &str) -> Self::Output {
let parts = rhs.split("/");
let mut result = self.inner.clone();
for part in parts {
result = result.join(part);
}
AbsolutePath::new(result)
}
}
impl AsRef<Path> for AbsolutePath {
fn as_ref(&self) -> &Path {
self.inner.as_path()
}
}
pub struct RelativePath {
inner: PathBuf,
}
impl RelativePath {
pub fn new(path: impl Into<PathBuf>) -> RelativePath {
let path = path.into();
if path.is_relative() {
RelativePath { inner: path }
} else {
panic!("RelativePath::new must take a relative path")
}
}
}
impl<T: AsRef<str>> Div<T> for &RelativePath {
type Output = RelativePath;
fn div(self, rhs: T) -> Self::Output {
let parts = rhs.as_ref().split("/");
let mut result = self.inner.clone();
for part in parts {
result = result.join(part);
}
RelativePath::new(result)
}
}
#[derive(Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct Res {
pub loc: PathBuf,
pub at: usize,
}
impl Res {}
pub struct FileStructure {
root: PathBuf,
pub resources: Vec<Res>,
}
impl FileStructure {
pub fn new() -> FileStructure {
FileStructure {
root: PathBuf::new(),
resources: Vec::<Res>::new(),
}
}
pub fn contains_more_than_one_file(&self) -> bool {
self.resources.len() > 1
}
pub fn contains_files(&self) -> bool {
self.resources.len() > 0
}
pub fn set_root(&mut self, path: &Path) {
self.root = path.to_path_buf();
}
pub fn paths_applying_with<F>(&mut self, to: F) -> Result<Vec<(PathBuf, PathBuf)>, Box<dyn std::error::Error>>
where
F: Fn((PathBuf, usize)) -> Result<(PathBuf, PathBuf), Box<dyn std::error::Error>>,
{
self.resources
.iter()
.map(|f| (PathBuf::from(&f.loc), f.at))
.map(|f| to(f))
.collect()
}
pub fn walk_decorate(&mut self, start_path: &Path) -> Result<(), ShellError> {
self.set_root(&dunce::canonicalize(start_path)?);
self.resources = Vec::<Res>::new();
self.build(start_path, 0)?;
self.resources.sort();
Ok(())
}
fn build(&mut self, src: &'a Path, lvl: usize) -> Result<(), ShellError> {
let source = dunce::canonicalize(src)?;
if source.is_dir() {
for entry in std::fs::read_dir(&source)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
self.build(&path, lvl + 1)?;
}
self.resources.push(Res {
loc: path.to_path_buf(),
at: lvl,
});
}
} else {
self.resources.push(Res {
loc: source,
at: lvl,
});
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::{FileStructure, Res};
use std::path::PathBuf;
fn fixtures() -> PathBuf {
let mut sdx = PathBuf::new();
sdx.push("tests");
sdx.push("fixtures");
sdx.push("formats");
match dunce::canonicalize(sdx) {
Ok(path) => path,
Err(_) => panic!("Wrong path."),
}
}
#[test]
fn prepares_and_decorates_source_files_for_copying() {
let mut res = FileStructure::new();
res.walk_decorate(fixtures().as_path()).expect("Can not decorate files traversal.");
assert_eq!(
res.resources,
vec![
Res {
loc: fixtures().join("appveyor.yml"),
at: 0
},
Res {
loc: fixtures().join("caco3_plastics.csv"),
at: 0
},
Res {
loc: fixtures().join("cargo_sample.toml"),
at: 0
},
Res {
loc: fixtures().join("jonathan.xml"),
at: 0
},
Res {
loc: fixtures().join("sample.ini"),
at: 0
},
Res {
loc: fixtures().join("sgml_description.json"),
at: 0
},
Res {
loc: fixtures().join("utf16.ini"),
at: 0
}
]
);
}
}
| 23.893023 | 114 | 0.468756 |
e2aca3ef82bbd558387afa34d67faee148de38f9 | 1,773 | #![recursion_limit = "1024"]
extern crate bbrot_lib;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate error_chain;
mod errors {
error_chain!{}
}
use bbrot_lib::Setup;
use clap::{App, Arg};
use errors::*;
quick_main!(run);
fn run() -> Result<()> {
let matches = App::new("Buddhabrot Generator")
.about("Generates a png image of a Buddhabrot")
.version(crate_version!())
.args_from_usage(
"<WIDTH> 'Width of rendering'
<HEIGHT> 'Height of rendering'
<POINTS> 'Minimum number of initial points to iterate'"
)
.arg(
Arg::with_name("PRECISION")
.short("p")
.long("precision")
.default_value("64")
.possible_values(&["32", "64"])
.help("Whether to use 32 or 64 bit floating point numbers"),
)
.arg_from_usage("<OUTPUT> 'File to save output to'")
.get_matches();
let read_usize_arg = |arg| value_t!(matches.value_of(arg), usize).unwrap_or_else(|e| e.exit());
let read_u64_arg = |arg| value_t!(matches.value_of(arg), u64).unwrap_or_else(|e| e.exit());
let width = read_usize_arg("WIDTH");
let height = read_usize_arg("HEIGHT");
let points = read_u64_arg("POINTS");
let output = matches.value_of("OUTPUT").unwrap();
match matches.value_of("PRECISION").unwrap() {
"32" => {
Setup::<f32>::new(width, height, points)
.save_to_png(output)
.chain_err(|| "error writing png")
}
"64" => {
Setup::<f64>::new(width, height, points)
.save_to_png(output)
.chain_err(|| "error writing png")
}
_ => unreachable!(),
}
}
| 29.065574 | 99 | 0.555556 |
50ab7a0ee1d869f3d95aacd87abf74097c68c5c8 | 2,882 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
#![allow(clippy::vec_init_then_push)]
#![allow(rustdoc::bare_urls)]
#![warn(missing_docs)]
//! <p>Provides APIs for creating and managing Amazon SageMaker resources. </p>
//! <p>Other Resources:</p>
//! <ul>
//! <li>
//! <p>
//! <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html#first-time-user">Amazon SageMaker Developer
//! Guide</a>
//! </p>
//! </li>
//! <li>
//! <p>
//! <a href="https://docs.aws.amazon.com/augmented-ai/2019-11-07/APIReference/Welcome.html">Amazon Augmented AI
//! Runtime API Reference</a>
//! </p>
//! </li>
//! </ul>
//!
//! # Crate Organization
//!
//! The entry point for most customers will be [`Client`]. [`Client`] exposes one method for each API offered
//! by the service.
//!
//! Some APIs require complex or nested arguments. These exist in [`model`].
//!
//! Lastly, errors that can be returned by the service are contained within [`error`]. [`Error`] defines a meta
//! error encompassing all possible errors that can be returned by the service.
//!
//! The other modules within this crate are not required for normal usage.
//!
//! # Examples
//! Examples can be found [here](https://github.com/awslabs/aws-sdk-rust/tree/main/examples/sagemaker).
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
/// Client and fluent builders for calling the service.
pub mod client;
/// Configuration for the service.
pub mod config;
/// Errors that can occur when calling the service.
pub mod error;
mod error_meta;
mod idempotency_token;
/// Input structures for operations.
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
/// Generated accessors for nested fields
mod lens;
pub mod middleware;
/// Data structures used by operation inputs/outputs.
pub mod model;
mod no_credentials;
/// All operations that this crate can perform.
pub mod operation;
mod operation_deser;
mod operation_ser;
/// Output structures for operations.
pub mod output;
/// Paginators for the service
pub mod paginator;
/// Crate version number.
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use aws_smithy_http::byte_stream::ByteStream;
pub use aws_smithy_http::result::SdkError;
pub use aws_smithy_types::Blob;
pub use aws_smithy_types::DateTime;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("sagemaker", PKG_VERSION);
pub use aws_smithy_http::endpoint::Endpoint;
pub use aws_smithy_types::retry::RetryConfig;
pub use aws_types::app_name::AppName;
pub use aws_types::region::Region;
pub use aws_types::Credentials;
pub use client::Client;
| 32.382022 | 116 | 0.735253 |
ed37077b65c43e4f04b25775a83f292b8b48533d | 7,316 | use crate::build_config::BuildConfig;
use crate::error::{err, ok, CompileError, CompileResult};
use crate::parse_tree::Expression;
use crate::parser::Rule;
use crate::span::Span;
use crate::{parse_array_index, Ident};
use pest::iterators::Pair;
#[derive(Debug, Clone)]
pub struct Reassignment<'sc> {
// the thing being reassigned
pub lhs: Box<Expression<'sc>>,
// the expression that is being assigned to the lhs
pub rhs: Expression<'sc>,
pub(crate) span: Span<'sc>,
}
impl<'sc> Reassignment<'sc> {
pub(crate) fn parse_from_pair(
pair: Pair<'sc, Rule>,
config: Option<&BuildConfig>,
) -> CompileResult<'sc, Reassignment<'sc>> {
let path = config.map(|c| c.path());
let span = Span {
span: pair.as_span(),
path: path.clone(),
};
let mut warnings = vec![];
let mut errors = vec![];
let mut iter = pair.into_inner();
let variable_or_struct_reassignment = iter.next().expect("guaranteed by grammar");
match variable_or_struct_reassignment.as_rule() {
Rule::variable_reassignment => {
let mut iter = variable_or_struct_reassignment.into_inner();
let name = check!(
Expression::parse_from_pair_inner(iter.next().unwrap(), config),
return err(warnings, errors),
warnings,
errors
);
let body = iter.next().unwrap();
let body = check!(
Expression::parse_from_pair(body.clone(), config),
Expression::Unit {
span: Span {
span: body.as_span(),
path
}
},
warnings,
errors
);
ok(
Reassignment {
lhs: Box::new(name),
rhs: body,
span,
},
warnings,
errors,
)
}
Rule::struct_field_reassignment => {
let mut iter = variable_or_struct_reassignment.into_inner();
let lhs = iter.next().expect("guaranteed by grammar");
let rhs = iter.next().expect("guaranteed by grammar");
let rhs_span = Span {
span: rhs.as_span(),
path: path.clone(),
};
let body = check!(
Expression::parse_from_pair(rhs, config),
Expression::Unit { span: rhs_span },
warnings,
errors
);
let inner = lhs.into_inner().next().expect("guaranteed by grammar");
assert_eq!(inner.as_rule(), Rule::subfield_path);
// treat parent as one expr, final name as the field to be accessed
// if there are multiple fields, this is a nested expression
// i.e. `a.b.c` is a lookup of field `c` on `a.b` which is a lookup
// of field `b` on `a`
// the first thing is either an exp or a var, everything subsequent must be
// a field
let mut name_parts = inner.into_inner();
let mut expr = check!(
parse_subfield_path_ensure_only_var(
name_parts.next().expect("guaranteed by grammar"),
config
),
return err(warnings, errors),
warnings,
errors
);
for name_part in name_parts {
expr = Expression::SubfieldExpression {
prefix: Box::new(expr.clone()),
span: Span {
span: name_part.as_span(),
path: path.clone(),
},
field_to_access: check!(
Ident::parse_from_pair(name_part, config),
continue,
warnings,
errors
),
}
}
ok(
Reassignment {
lhs: Box::new(expr),
rhs: body,
span,
},
warnings,
errors,
)
}
_ => unreachable!("guaranteed by grammar"),
}
}
}
fn parse_subfield_path_ensure_only_var<'sc>(
item: Pair<'sc, Rule>,
config: Option<&BuildConfig>,
) -> CompileResult<'sc, Expression<'sc>> {
let warnings = vec![];
let mut errors = vec![];
let path = config.map(|c| c.path());
let item = item.into_inner().next().expect("guarenteed by grammar");
match item.as_rule() {
Rule::call_item => parse_call_item_ensure_only_var(item, config),
Rule::array_index => parse_array_index(item, config),
a => {
eprintln!(
"Unimplemented subfield path: {:?} ({:?}) ({:?})",
a,
item.as_str(),
item.as_rule()
);
errors.push(CompileError::UnimplementedRule(
a,
Span {
span: item.as_span(),
path: path.clone(),
},
));
// construct unit expression for error recovery
let exp = Expression::Unit {
span: Span {
span: item.as_span(),
path,
},
};
ok(exp, warnings, errors)
}
}
}
/// Parses a `call_item` rule but ensures that it is only a variable expression, since generic
/// expressions on the LHS of a reassignment are invalid.
/// valid:
/// ```ignore
/// x.y.foo = 5;
/// ```
///
/// invalid:
/// ```ignore
/// (foo()).x = 5;
/// ```
fn parse_call_item_ensure_only_var<'sc>(
item: Pair<'sc, Rule>,
config: Option<&BuildConfig>,
) -> CompileResult<'sc, Expression<'sc>> {
let path = config.map(|c| c.path());
let mut warnings = vec![];
let mut errors = vec![];
assert_eq!(item.as_rule(), Rule::call_item);
let item = item.into_inner().next().expect("guaranteed by grammar");
let exp = match item.as_rule() {
Rule::ident => Expression::VariableExpression {
name: check!(
Ident::parse_from_pair(item.clone(), config),
return err(warnings, errors),
warnings,
errors
),
span: Span {
span: item.as_span(),
path,
},
},
Rule::expr => {
errors.push(CompileError::InvalidExpressionOnLhs {
span: Span {
span: item.as_span(),
path,
},
});
return err(warnings, errors);
}
a => unreachable!("{:?}", a),
};
ok(exp, warnings, errors)
}
| 34.347418 | 94 | 0.45175 |
bbbcdfb83b7583398802bb25fc83ea7634abaa0c | 24,176 | use druid::{
piet::{Text, TextLayout, TextLayoutBuilder},
BoxConstraints, Env, Event, EventCtx, LayoutCtx, LifeCycle, LifeCycleCtx,
PaintCtx, Point, RenderContext, Size, UpdateCtx, Widget, WidgetId,
};
use lapce_data::{
buffer::DiffLines,
config::LapceTheme,
data::LapceTabData,
editor::{LapceEditorBufferData, Syntax},
};
use crate::svg::get_svg;
pub struct LapceEditorGutter {
view_id: WidgetId,
width: f64,
}
impl LapceEditorGutter {
pub fn new(view_id: WidgetId) -> Self {
Self {
view_id,
width: 0.0,
}
}
}
impl Widget<LapceTabData> for LapceEditorGutter {
fn event(
&mut self,
_ctx: &mut EventCtx,
_event: &Event,
_data: &mut LapceTabData,
_env: &Env,
) {
}
fn lifecycle(
&mut self,
_ctx: &mut LifeCycleCtx,
_event: &LifeCycle,
_data: &LapceTabData,
_env: &Env,
) {
}
fn update(
&mut self,
_ctx: &mut UpdateCtx,
_old_data: &LapceTabData,
_data: &LapceTabData,
_env: &Env,
) {
// let old_last_line = old_data.buffer.last_line() + 1;
// let last_line = data.buffer.last_line() + 1;
// if old_last_line.to_string().len() != last_line.to_string().len() {
// ctx.request_layout();
// return;
// }
// if (*old_data.main_split.active == self.view_id
// && *data.main_split.active != self.view_id)
// || (*old_data.main_split.active != self.view_id
// && *data.main_split.active == self.view_id)
// {
// ctx.request_paint();
// }
// if old_data.editor.cursor.current_line(&old_data.buffer)
// != data.editor.cursor.current_line(&data.buffer)
// {
// ctx.request_paint();
// }
// if old_data.current_code_actions().is_some()
// != data.current_code_actions().is_some()
// {
// ctx.request_paint();
// }
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &LapceTabData,
_env: &Env,
) -> Size {
let data = data.editor_view_content(self.view_id);
let last_line = data.buffer.last_line() + 1;
let char_width = data.config.editor_text_width(ctx.text(), "W");
self.width = (char_width * last_line.to_string().len() as f64).ceil();
let mut width = self.width + 16.0 + char_width * 2.0;
if data.editor.compare.is_some() {
width += self.width + char_width * 2.0;
}
Size::new(width.ceil(), bc.max().height)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &LapceTabData, _env: &Env) {
let data = data.editor_view_content(self.view_id);
self.paint_gutter(&data, ctx);
}
}
impl LapceEditorGutter {
fn paint_gutter_inline_diff(
&self,
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
compare: &str,
) {
if data.buffer.history_changes.get(compare).is_none() {
return;
}
let self_size = ctx.size();
let rect = self_size.to_rect();
let changes = data.buffer.history_changes.get(compare).unwrap();
let line_height = data.config.editor.line_height as f64;
let scroll_offset = data.editor.scroll_offset;
let start_line = (scroll_offset.y / line_height).floor() as usize;
let end_line =
(scroll_offset.y + rect.height() / line_height).ceil() as usize;
let current_line = data.editor.cursor.current_line(&data.buffer);
let last_line = data.buffer.last_line();
let width = data.config.editor_text_width(ctx.text(), "W");
let mut line = 0;
for change in changes.iter() {
match change {
DiffLines::Left(r) => {
let len = r.len();
line += len;
if line < start_line {
continue;
}
ctx.fill(
Size::new(self_size.width, line_height * len as f64)
.to_rect()
.with_origin(Point::new(
0.0,
line_height * (line - len) as f64 - scroll_offset.y,
)),
data.config
.get_color_unchecked(LapceTheme::SOURCE_CONTROL_REMOVED),
);
for l in line - len..line {
if l < start_line {
continue;
}
let actual_line = l - (line - len) + r.start;
let content = actual_line + 1;
let x = ((last_line + 1).to_string().len()
- content.to_string().len())
as f64
* width;
let y = line_height * l as f64 + 5.0 - scroll_offset.y;
let pos = Point::new(x, y);
let text_layout = ctx
.text()
.new_text_layout(
content.to_string()
+ &vec![
" ";
(last_line + 1).to_string().len() + 2
]
.join("")
+ " -",
)
.font(
data.config.editor.font_family(),
data.config.editor.font_size as f64,
)
.text_color(
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone(),
)
.build()
.unwrap();
ctx.draw_text(&text_layout, pos);
if l > end_line {
break;
}
}
}
DiffLines::Both(left, r) => {
let len = r.len();
line += len;
if line < start_line {
continue;
}
for l in line - len..line {
if l < start_line {
continue;
}
let left_actual_line = l - (line - len) + left.start;
let right_actual_line = l - (line - len) + r.start;
let left_content = left_actual_line + 1;
let x = ((last_line + 1).to_string().len()
- left_content.to_string().len())
as f64
* width;
let y = line_height * l as f64 + 5.0 - scroll_offset.y;
let pos = Point::new(x, y);
let text_layout = ctx
.text()
.new_text_layout(left_content.to_string())
.font(
data.config.editor.font_family(),
data.config.editor.font_size as f64,
)
.text_color(
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone(),
)
.build()
.unwrap();
ctx.draw_text(&text_layout, pos);
let right_content = right_actual_line + 1;
let x = ((last_line + 1).to_string().len()
- right_content.to_string().len())
as f64
* width
+ self.width
+ 2.0 * width;
let pos = Point::new(x, y);
let text_layout = ctx
.text()
.new_text_layout(right_content.to_string())
.font(
data.config.editor.font_family(),
data.config.editor.font_size as f64,
)
.text_color(if right_actual_line == current_line {
data.config
.get_color_unchecked(
LapceTheme::EDITOR_FOREGROUND,
)
.clone()
} else {
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone()
})
.build()
.unwrap();
ctx.draw_text(&text_layout, pos);
if l > end_line {
break;
}
}
}
DiffLines::Skip(_l, _r) => {
let rect = Size::new(self_size.width, line_height)
.to_rect()
.with_origin(Point::new(
0.0,
line_height * line as f64 - scroll_offset.y,
));
ctx.fill(
rect,
data.config
.get_color_unchecked(LapceTheme::PANEL_BACKGROUND),
);
ctx.stroke(
rect,
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND),
1.0,
);
line += 1;
}
DiffLines::Right(r) => {
let len = r.len();
line += len;
if line < start_line {
continue;
}
ctx.fill(
Size::new(self_size.width, line_height * len as f64)
.to_rect()
.with_origin(Point::new(
0.0,
line_height * (line - len) as f64 - scroll_offset.y,
)),
data.config
.get_color_unchecked(LapceTheme::SOURCE_CONTROL_ADDED),
);
for l in line - len..line {
if l < start_line {
continue;
}
let actual_line = l - (line - len) + r.start;
let content = actual_line + 1;
let x = ((last_line + 1).to_string().len()
- content.to_string().len())
as f64
* width
+ self.width
+ 2.0 * width;
let y = line_height * l as f64 + 5.0 - scroll_offset.y;
let pos = Point::new(x, y);
let text_layout = ctx
.text()
.new_text_layout(content.to_string() + " +")
.font(
data.config.editor.font_family(),
data.config.editor.font_size as f64,
)
.text_color(if actual_line == current_line {
data.config
.get_color_unchecked(
LapceTheme::EDITOR_FOREGROUND,
)
.clone()
} else {
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone()
})
.build()
.unwrap();
ctx.draw_text(&text_layout, pos);
if l > end_line {
break;
}
}
}
}
}
}
fn paint_gutter_code_lens(
&self,
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
) {
let rect = ctx.size().to_rect();
let scroll_offset = data.editor.scroll_offset;
let empty_lens = Syntax::lens_from_normal_lines(
data.buffer.len(),
data.config.editor.line_height,
data.config.editor.code_lens_font_size,
&[],
);
let lens = if let Some(syntax) = data.buffer.syntax.as_ref() {
&syntax.lens
} else {
&empty_lens
};
let cursor_line = data
.buffer
.line_of_offset(data.editor.cursor.offset().min(data.buffer.len()));
let last_line = data.buffer.line_of_offset(data.buffer.len());
let start_line = lens
.line_of_height(scroll_offset.y.floor() as usize)
.min(last_line);
let end_line = lens
.line_of_height(
(scroll_offset.y + rect.height()).ceil() as usize
+ data.config.editor.line_height,
)
.min(last_line);
let char_width = data
.config
.char_width(ctx.text(), data.config.editor.font_size as f64);
let max_line_width = (last_line + 1).to_string().len() as f64 * char_width;
let mut y = lens.height_of_line(start_line) as f64;
for (line, line_height) in lens.iter_chunks(start_line..end_line + 1) {
let content = if *data.main_split.active != Some(self.view_id)
|| data.editor.cursor.is_insert()
|| line == cursor_line
{
line + 1
} else if line > cursor_line {
line - cursor_line
} else {
cursor_line - line
};
let content = content.to_string();
let is_small = line_height < data.config.editor.line_height;
let text_layout = ctx
.text()
.new_text_layout(content.clone())
.font(
data.config.editor.font_family(),
if is_small {
data.config.editor.code_lens_font_size as f64
} else {
data.config.editor.font_size as f64
},
)
.text_color(if line == cursor_line {
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND)
.clone()
} else {
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone()
})
.build()
.unwrap();
let x = max_line_width - text_layout.size().width;
let pos = Point::new(
x,
y - scroll_offset.y
+ if is_small {
0.0
} else {
(line_height as f64 - text_layout.size().height) / 2.0
},
);
ctx.draw_text(&text_layout, pos);
y += line_height as f64;
}
}
fn paint_code_actions_hint(
&self,
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
) {
if let Some(actions) = data.current_code_actions() {
if !actions.is_empty() {
let line_height = data.config.editor.line_height as f64;
let offset = data.editor.cursor.offset();
let (line, _) = data
.buffer
.offset_to_line_col(offset, data.config.editor.tab_width);
let svg = get_svg("lightbulb.svg").unwrap();
let width = 16.0;
let height = 16.0;
let char_width = data.config.editor_text_width(ctx.text(), "W");
let rect =
Size::new(width, height).to_rect().with_origin(Point::new(
self.width + char_width + 3.0,
(line_height - height) / 2.0 + line_height * line as f64
- data.editor.scroll_offset.y,
));
ctx.draw_svg(
&svg,
rect,
Some(data.config.get_color_unchecked(LapceTheme::LAPCE_WARN)),
);
}
}
}
fn paint_gutter(&self, data: &LapceEditorBufferData, ctx: &mut PaintCtx) {
let rect = ctx.size().to_rect();
ctx.with_save(|ctx| {
let clip_rect = rect;
ctx.clip(clip_rect);
if let Some(compare) = data.editor.compare.as_ref() {
self.paint_gutter_inline_diff(data, ctx, compare);
return;
}
if data.editor.code_lens {
self.paint_gutter_code_lens(data, ctx);
return;
}
let line_height = data.config.editor.line_height as f64;
let scroll_offset = data.editor.scroll_offset;
let start_line = (scroll_offset.y / line_height).floor() as usize;
let end_line =
(scroll_offset.y + rect.height() / line_height).ceil() as usize;
let num_lines = (ctx.size().height / line_height).floor() as usize;
let last_line = data.buffer.last_line();
let current_line = data.editor.cursor.current_line(&data.buffer);
let width = data.config.editor_text_width(ctx.text(), "W");
for line in start_line..start_line + num_lines + 1 {
if line > last_line {
break;
}
let content = if *data.main_split.active != Some(data.view_id)
|| data.editor.cursor.is_insert()
|| line == current_line
{
line + 1
} else if line > current_line {
line - current_line
} else {
current_line - line
};
let content = content.to_string();
let text_layout = ctx
.text()
.new_text_layout(content.clone())
.font(
data.config.editor.font_family(),
data.config.editor.font_size as f64,
)
.text_color(if line == current_line {
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND)
.clone()
} else {
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone()
})
.build()
.unwrap();
let x = ((last_line + 1).to_string().len() - content.len()) as f64
* width;
let y = line_height * line as f64 - scroll_offset.y
+ (line_height - text_layout.size().height) / 2.0;
let pos = Point::new(x, y);
ctx.draw_text(&text_layout, pos);
}
if let Some(changes) = data.buffer.history_changes.get("head") {
let mut line = 0;
let mut last_change = None;
for change in changes.iter() {
let len = match change {
DiffLines::Left(_range) => 0,
DiffLines::Skip(_left, right) => right.len(),
DiffLines::Both(_left, right) => right.len(),
DiffLines::Right(range) => range.len(),
};
line += len;
if line < start_line {
last_change = Some(change.clone());
continue;
}
let mut modified = false;
let color = match change {
DiffLines::Left(_range) => {
Some(data.config.get_color_unchecked(
LapceTheme::SOURCE_CONTROL_REMOVED,
))
}
DiffLines::Right(_range) => {
if let Some(DiffLines::Left(_)) = last_change.as_ref() {
modified = true;
}
if modified {
Some(data.config.get_color_unchecked(
LapceTheme::SOURCE_CONTROL_MODIFIED,
))
} else {
Some(data.config.get_color_unchecked(
LapceTheme::SOURCE_CONTROL_ADDED,
))
}
}
_ => None,
};
if let Some(color) = color {
let removed_height = 10.0;
let size = Size::new(
3.0,
if len == 0 {
removed_height
} else {
line_height * len as f64
},
);
let x = self.width + width;
let mut y =
(line - len) as f64 * line_height - scroll_offset.y;
if len == 0 {
y -= removed_height / 2.0;
}
if modified {
let rect = Size::new(3.0, removed_height)
.to_rect()
.with_origin(Point::new(
x,
y - removed_height / 2.0,
));
ctx.fill(
rect,
data.config.get_color_unchecked(
LapceTheme::EDITOR_BACKGROUND,
),
);
}
let rect = size.to_rect().with_origin(Point::new(x, y));
ctx.fill(rect, &color.clone().with_alpha(0.8));
}
if line > end_line {
break;
}
last_change = Some(change.clone());
}
}
if *data.main_split.active == Some(self.view_id) {
self.paint_code_actions_hint(data, ctx);
}
});
}
}
| 38.993548 | 85 | 0.39109 |
7ad136c1da024899c00ceb525d6579ef506a76b1 | 5,192 | use csv::WriterBuilder;
use indexmap::{indexset, IndexSet};
use nu_protocol::{Config, IntoPipelineData, PipelineData, ShellError, Span, Value};
use std::collections::VecDeque;
fn from_value_to_delimited_string(
value: &Value,
separator: char,
config: &Config,
head: Span,
) -> Result<String, ShellError> {
match value {
Value::Record { cols, vals, span } => {
let mut wtr = WriterBuilder::new()
.delimiter(separator as u8)
.from_writer(vec![]);
let mut fields: VecDeque<String> = VecDeque::new();
let mut values: VecDeque<String> = VecDeque::new();
for (k, v) in cols.iter().zip(vals.iter()) {
fields.push_back(k.clone());
values.push_back(to_string_tagged_value(v, config, *span)?);
}
wtr.write_record(fields).expect("can not write.");
wtr.write_record(values).expect("can not write.");
let v = String::from_utf8(wtr.into_inner().map_err(|_| {
ShellError::UnsupportedInput("Could not convert record".to_string(), *span)
})?)
.map_err(|_| {
ShellError::UnsupportedInput("Could not convert record".to_string(), *span)
})?;
Ok(v)
}
Value::List { vals, span } => {
let mut wtr = WriterBuilder::new()
.delimiter(separator as u8)
.from_writer(vec![]);
let merged_descriptors = merge_descriptors(vals);
if merged_descriptors.is_empty() {
wtr.write_record(
vals.iter()
.map(|ele| {
to_string_tagged_value(ele, config, *span)
.unwrap_or_else(|_| String::new())
})
.collect::<Vec<_>>(),
)
.expect("can not write");
} else {
wtr.write_record(merged_descriptors.iter().map(|item| &item[..]))
.expect("can not write.");
for l in vals {
let mut row = vec![];
for desc in &merged_descriptors {
row.push(match l.to_owned().get_data_by_key(desc) {
Some(s) => to_string_tagged_value(&s, config, *span)?,
None => String::new(),
});
}
wtr.write_record(&row).expect("can not write");
}
}
let v = String::from_utf8(wtr.into_inner().map_err(|_| {
ShellError::UnsupportedInput("Could not convert record".to_string(), *span)
})?)
.map_err(|_| {
ShellError::UnsupportedInput("Could not convert record".to_string(), *span)
})?;
Ok(v)
}
_ => to_string_tagged_value(value, config, head),
}
}
fn to_string_tagged_value(v: &Value, config: &Config, span: Span) -> Result<String, ShellError> {
match &v {
Value::String { .. }
| Value::Bool { .. }
| Value::Int { .. }
| Value::Duration { .. }
| Value::Binary { .. }
| Value::CustomValue { .. }
| Value::Error { .. }
| Value::Filesize { .. }
| Value::CellPath { .. }
| Value::List { .. }
| Value::Record { .. }
| Value::Float { .. } => Ok(v.clone().into_abbreviated_string(config)),
Value::Date { val, .. } => Ok(val.to_string()),
Value::Nothing { .. } => Ok(String::new()),
_ => Err(ShellError::UnsupportedInput(
"Unexpected value".to_string(),
v.span().unwrap_or(span),
)),
}
}
pub fn merge_descriptors(values: &[Value]) -> Vec<String> {
let mut ret: Vec<String> = vec![];
let mut seen: IndexSet<String> = indexset! {};
for value in values {
let data_descriptors = match value {
Value::Record { cols, .. } => cols.to_owned(),
_ => vec!["".to_string()],
};
for desc in data_descriptors {
if !seen.contains(&desc) {
seen.insert(desc.to_string());
ret.push(desc.to_string());
}
}
}
ret
}
pub fn to_delimited_data(
noheaders: bool,
sep: char,
format_name: &'static str,
input: PipelineData,
span: Span,
config: &Config,
) -> Result<PipelineData, ShellError> {
let value = input.into_value(span);
let output = match from_value_to_delimited_string(&value, sep, config, span) {
Ok(mut x) => {
if noheaders {
if let Some(second_line) = x.find('\n') {
let start = second_line + 1;
x.replace_range(0..start, "");
}
}
Ok(x)
}
Err(_) => Err(ShellError::CantConvert(
format_name.into(),
value.get_type().to_string(),
value.span().unwrap_or(span),
None,
)),
}?;
Ok(Value::string(output, span).into_pipeline_data())
}
| 34.613333 | 97 | 0.488251 |
abe67746efbc30a5374dc02f1f0ee88251487988 | 110,884 | /*!
# DX11 backend internals.
## Pipeline Layout
In D3D11 there are tables of CBVs, SRVs, UAVs, and samplers.
Each descriptor type can take 1 or two of those entry points.
The descriptor pool is just and array of handles, belonging to descriptor set 1, descriptor set 2, etc.
Each range of descriptors in a descriptor set area of the pool is split into shader stages,
which in turn is split into CBS/SRV/UAV/Sampler parts. That allows binding a descriptor set as a list
of continuous descriptor ranges (per type, per shader stage).
!*/
//#[deny(missing_docs)]
#[macro_use]
extern crate bitflags;
#[macro_use]
extern crate log;
#[macro_use]
extern crate winapi;
use hal::{
adapter,
buffer,
command,
format,
image,
memory,
pass,
pso,
query,
queue,
range::RangeArg,
window,
DrawCount,
IndexCount,
InstanceCount,
Limits,
VertexCount,
VertexOffset,
WorkGroupCount,
};
use range_alloc::RangeAllocator;
use winapi::{
shared::{
dxgi::{IDXGIAdapter, IDXGIFactory, IDXGISwapChain},
dxgiformat,
minwindef::{FALSE, HMODULE, UINT},
windef::{HWND, RECT},
winerror,
},
um::{d3d11, d3dcommon, winuser::GetClientRect},
Interface as _,
};
use wio::com::ComPtr;
use parking_lot::{Condvar, Mutex};
use std::{borrow::Borrow, cell::RefCell, fmt, mem, ops::Range, os::raw::c_void, ptr, sync::Arc};
macro_rules! debug_scope {
($context:expr, $($arg:tt)+) => ({
#[cfg(debug_assertions)]
{
$crate::debug::DebugScope::with_name(
$context,
format_args!($($arg)+),
)
}
#[cfg(not(debug_assertions))]
{
()
}
});
}
macro_rules! debug_marker {
($context:expr, $($arg:tt)+) => ({
#[cfg(debug_assertions)]
{
$crate::debug::debug_marker(
$context,
format_args!($($arg)+),
);
}
});
}
mod conv;
#[cfg(debug_assertions)]
mod debug;
mod device;
mod dxgi;
mod internal;
mod shader;
type CreateFun = unsafe extern "system" fn(
*mut IDXGIAdapter,
UINT,
HMODULE,
UINT,
*const UINT,
UINT,
UINT,
*mut *mut d3d11::ID3D11Device,
*mut UINT,
*mut *mut d3d11::ID3D11DeviceContext,
) -> winerror::HRESULT;
#[derive(Clone)]
pub(crate) struct ViewInfo {
resource: *mut d3d11::ID3D11Resource,
kind: image::Kind,
caps: image::ViewCapabilities,
view_kind: image::ViewKind,
format: dxgiformat::DXGI_FORMAT,
range: image::SubresourceRange,
}
impl fmt::Debug for ViewInfo {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("ViewInfo")
}
}
#[derive(Debug)]
pub struct Instance {
pub(crate) factory: ComPtr<IDXGIFactory>,
pub(crate) dxgi_version: dxgi::DxgiVersion,
library_d3d11: Arc<libloading::Library>,
library_dxgi: libloading::Library,
}
unsafe impl Send for Instance {}
unsafe impl Sync for Instance {}
impl Instance {
pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface {
Surface {
factory: self.factory.clone(),
wnd_handle: hwnd as *mut _,
presentation: None,
}
}
}
fn get_features(
_device: ComPtr<d3d11::ID3D11Device>,
_feature_level: d3dcommon::D3D_FEATURE_LEVEL,
) -> hal::Features {
hal::Features::ROBUST_BUFFER_ACCESS
| hal::Features::FULL_DRAW_INDEX_U32
| hal::Features::FORMAT_BC
| hal::Features::INSTANCE_RATE
| hal::Features::SAMPLER_MIP_LOD_BIAS
}
fn get_format_properties(
device: ComPtr<d3d11::ID3D11Device>,
) -> [format::Properties; format::NUM_FORMATS] {
let mut format_properties = [format::Properties::default(); format::NUM_FORMATS];
for (i, props) in &mut format_properties.iter_mut().enumerate().skip(1) {
let format: format::Format = unsafe { mem::transmute(i as u32) };
let dxgi_format = match conv::map_format(format) {
Some(format) => format,
None => continue,
};
let mut support = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT {
InFormat: dxgi_format,
OutFormatSupport: 0,
};
let mut support_2 = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2 {
InFormat: dxgi_format,
OutFormatSupport2: 0,
};
let hr = unsafe {
device.CheckFeatureSupport(
d3d11::D3D11_FEATURE_FORMAT_SUPPORT,
&mut support as *mut _ as *mut _,
mem::size_of::<d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT>() as UINT,
)
};
if hr == winerror::S_OK {
let can_buffer = 0 != support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BUFFER;
let can_image = 0
!= support.OutFormatSupport
& (d3d11::D3D11_FORMAT_SUPPORT_TEXTURE1D
| d3d11::D3D11_FORMAT_SUPPORT_TEXTURE2D
| d3d11::D3D11_FORMAT_SUPPORT_TEXTURE3D
| d3d11::D3D11_FORMAT_SUPPORT_TEXTURECUBE);
let can_linear = can_image && !format.surface_desc().is_compressed();
if can_image {
props.optimal_tiling |=
format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC;
}
if can_linear {
props.linear_tiling |=
format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC;
}
if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER != 0 {
props.buffer_features |= format::BufferFeature::VERTEX;
}
if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_SAMPLE != 0 {
props.optimal_tiling |= format::ImageFeature::SAMPLED_LINEAR;
}
if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_RENDER_TARGET != 0 {
props.optimal_tiling |=
format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST;
if can_linear {
props.linear_tiling |=
format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST;
}
}
if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BLENDABLE != 0 {
props.optimal_tiling |= format::ImageFeature::COLOR_ATTACHMENT_BLEND;
}
if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_DEPTH_STENCIL != 0 {
props.optimal_tiling |= format::ImageFeature::DEPTH_STENCIL_ATTACHMENT;
}
if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_LOAD != 0 {
//TODO: check d3d12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD ?
if can_buffer {
props.buffer_features |= format::BufferFeature::UNIFORM_TEXEL;
}
}
let hr = unsafe {
device.CheckFeatureSupport(
d3d11::D3D11_FEATURE_FORMAT_SUPPORT2,
&mut support_2 as *mut _ as *mut _,
mem::size_of::<d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2>() as UINT,
)
};
if hr == winerror::S_OK {
if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD != 0 {
//TODO: other atomic flags?
if can_buffer {
props.buffer_features |= format::BufferFeature::STORAGE_TEXEL_ATOMIC;
}
if can_image {
props.optimal_tiling |= format::ImageFeature::STORAGE_ATOMIC;
}
}
if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE != 0 {
if can_buffer {
props.buffer_features |= format::BufferFeature::STORAGE_TEXEL;
}
if can_image {
props.optimal_tiling |= format::ImageFeature::STORAGE;
}
}
}
}
//TODO: blits, linear tiling
}
format_properties
}
impl hal::Instance<Backend> for Instance {
fn create(_: &str, _: u32) -> Result<Self, hal::UnsupportedBackend> {
// TODO: get the latest factory we can find
match dxgi::get_dxgi_factory() {
Ok((library_dxgi, factory, dxgi_version)) => {
info!("DXGI version: {:?}", dxgi_version);
let library_d3d11 = Arc::new(
libloading::Library::new("d3d11.dll").map_err(|_| hal::UnsupportedBackend)?,
);
Ok(Instance {
factory,
dxgi_version,
library_d3d11,
library_dxgi,
})
}
Err(hr) => {
info!("Failed on factory creation: {:?}", hr);
Err(hal::UnsupportedBackend)
}
}
}
fn enumerate_adapters(&self) -> Vec<adapter::Adapter<Backend>> {
let mut adapters = Vec::new();
let mut idx = 0;
let func: libloading::Symbol<CreateFun> =
match unsafe { self.library_d3d11.get(b"D3D11CreateDevice") } {
Ok(func) => func,
Err(e) => {
error!("Unable to get device creation function: {:?}", e);
return Vec::new();
}
};
while let Ok((adapter, info)) =
dxgi::get_adapter(idx, self.factory.as_raw(), self.dxgi_version)
{
idx += 1;
use hal::memory::Properties;
// TODO: move into function?
let (device, feature_level) = {
let feature_level = get_feature_level(&func, adapter.as_raw());
let mut device = ptr::null_mut();
let hr = unsafe {
func(
adapter.as_raw() as *mut _,
d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
ptr::null_mut(),
0,
[feature_level].as_ptr(),
1,
d3d11::D3D11_SDK_VERSION,
&mut device as *mut *mut _ as *mut *mut _,
ptr::null_mut(),
ptr::null_mut(),
)
};
if !winerror::SUCCEEDED(hr) {
continue;
}
(
unsafe { ComPtr::<d3d11::ID3D11Device>::from_raw(device) },
feature_level,
)
};
let memory_properties = adapter::MemoryProperties {
memory_types: vec![
adapter::MemoryType {
properties: Properties::DEVICE_LOCAL,
heap_index: 0,
},
adapter::MemoryType {
properties: Properties::CPU_VISIBLE
| Properties::COHERENT
| Properties::CPU_CACHED,
heap_index: 1,
},
adapter::MemoryType {
properties: Properties::CPU_VISIBLE | Properties::CPU_CACHED,
heap_index: 1,
},
],
// TODO: would using *VideoMemory and *SystemMemory from
// DXGI_ADAPTER_DESC be too optimistic? :)
memory_heaps: vec![!0, !0],
};
let limits = hal::Limits {
max_image_1d_size: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION as _,
max_image_2d_size: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _,
max_image_3d_size: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION as _,
max_image_cube_size: d3d11::D3D11_REQ_TEXTURECUBE_DIMENSION as _,
max_image_array_layers: d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _,
max_texel_elements: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, //TODO
max_patch_size: 0, // TODO
max_viewports: d3d11::D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as _,
max_viewport_dimensions: [d3d11::D3D11_VIEWPORT_BOUNDS_MAX; 2],
max_framebuffer_extent: hal::image::Extent {
//TODO
width: 4096,
height: 4096,
depth: 1,
},
max_compute_work_group_count: [
d3d11::D3D11_CS_THREAD_GROUP_MAX_X,
d3d11::D3D11_CS_THREAD_GROUP_MAX_Y,
d3d11::D3D11_CS_THREAD_GROUP_MAX_Z,
],
max_compute_work_group_size: [
d3d11::D3D11_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP,
1,
1,
], // TODO
max_vertex_input_attribute_offset: 255, // TODO
max_vertex_input_attributes: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _,
max_vertex_input_binding_stride:
d3d11::D3D11_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES as _,
max_vertex_input_bindings: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, // TODO: verify same as attributes
max_vertex_output_components: d3d11::D3D11_VS_OUTPUT_REGISTER_COUNT as _, // TODO
min_texel_buffer_offset_alignment: 1, // TODO
min_uniform_buffer_offset_alignment: 16, // TODO: verify
min_storage_buffer_offset_alignment: 1, // TODO
framebuffer_color_sample_counts: 1, // TODO
framebuffer_depth_sample_counts: 1, // TODO
framebuffer_stencil_sample_counts: 1, // TODO
max_color_attachments: d3d11::D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT as _,
buffer_image_granularity: 1,
non_coherent_atom_size: 1, // TODO
max_sampler_anisotropy: 16.,
optimal_buffer_copy_offset_alignment: 1, // TODO
optimal_buffer_copy_pitch_alignment: 1, // TODO
min_vertex_input_binding_stride_alignment: 1,
..hal::Limits::default() //TODO
};
let features = get_features(device.clone(), feature_level);
let format_properties = get_format_properties(device.clone());
let physical_device = PhysicalDevice {
adapter,
library_d3d11: Arc::clone(&self.library_d3d11),
features,
limits,
memory_properties,
format_properties,
};
info!("{:#?}", info);
adapters.push(adapter::Adapter {
info,
physical_device,
queue_families: vec![QueueFamily],
});
}
adapters
}
unsafe fn create_surface(
&self,
has_handle: &impl raw_window_handle::HasRawWindowHandle,
) -> Result<Surface, hal::window::InitError> {
match has_handle.raw_window_handle() {
raw_window_handle::RawWindowHandle::Windows(handle) => {
Ok(self.create_surface_from_hwnd(handle.hwnd))
}
_ => Err(hal::window::InitError::UnsupportedWindowHandle),
}
}
unsafe fn destroy_surface(&self, _surface: Surface) {
// TODO: Implement Surface cleanup
}
}
pub struct PhysicalDevice {
adapter: ComPtr<IDXGIAdapter>,
library_d3d11: Arc<libloading::Library>,
features: hal::Features,
limits: hal::Limits,
memory_properties: adapter::MemoryProperties,
format_properties: [format::Properties; format::NUM_FORMATS],
}
impl fmt::Debug for PhysicalDevice {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("PhysicalDevice")
}
}
unsafe impl Send for PhysicalDevice {}
unsafe impl Sync for PhysicalDevice {}
// TODO: does the adapter we get earlier matter for feature level?
fn get_feature_level(func: &CreateFun, adapter: *mut IDXGIAdapter) -> d3dcommon::D3D_FEATURE_LEVEL {
let requested_feature_levels = [
d3dcommon::D3D_FEATURE_LEVEL_11_1,
d3dcommon::D3D_FEATURE_LEVEL_11_0,
d3dcommon::D3D_FEATURE_LEVEL_10_1,
d3dcommon::D3D_FEATURE_LEVEL_10_0,
d3dcommon::D3D_FEATURE_LEVEL_9_3,
d3dcommon::D3D_FEATURE_LEVEL_9_2,
d3dcommon::D3D_FEATURE_LEVEL_9_1,
];
let mut feature_level = d3dcommon::D3D_FEATURE_LEVEL_9_1;
let hr = unsafe {
func(
adapter,
d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
ptr::null_mut(),
0,
requested_feature_levels[..].as_ptr(),
requested_feature_levels.len() as _,
d3d11::D3D11_SDK_VERSION,
ptr::null_mut(),
&mut feature_level as *mut _,
ptr::null_mut(),
)
};
if !winerror::SUCCEEDED(hr) {
// if there is no 11.1 runtime installed, requesting
// `D3D_FEATURE_LEVEL_11_1` will return E_INVALIDARG so we just retry
// without that
if hr == winerror::E_INVALIDARG {
let hr = unsafe {
func(
adapter,
d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
ptr::null_mut(),
0,
requested_feature_levels[1 ..].as_ptr(),
(requested_feature_levels.len() - 1) as _,
d3d11::D3D11_SDK_VERSION,
ptr::null_mut(),
&mut feature_level as *mut _,
ptr::null_mut(),
)
};
if !winerror::SUCCEEDED(hr) {
// TODO: device might not support any feature levels?
unimplemented!();
}
}
}
feature_level
}
// TODO: PhysicalDevice
impl adapter::PhysicalDevice<Backend> for PhysicalDevice {
unsafe fn open(
&self,
families: &[(&QueueFamily, &[queue::QueuePriority])],
requested_features: hal::Features,
) -> Result<adapter::Gpu<Backend>, hal::device::CreationError> {
let func: libloading::Symbol<CreateFun> =
self.library_d3d11.get(b"D3D11CreateDevice").unwrap();
let (device, cxt) = {
if !self.features().contains(requested_features) {
return Err(hal::device::CreationError::MissingFeature);
}
let feature_level = get_feature_level(&func, self.adapter.as_raw());
let mut returned_level = d3dcommon::D3D_FEATURE_LEVEL_9_1;
#[cfg(debug_assertions)]
let create_flags = d3d11::D3D11_CREATE_DEVICE_DEBUG;
#[cfg(not(debug_assertions))]
let create_flags = 0;
// TODO: request debug device only on debug config?
let mut device = ptr::null_mut();
let mut cxt = ptr::null_mut();
let hr = func(
self.adapter.as_raw() as *mut _,
d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
ptr::null_mut(),
create_flags,
[feature_level].as_ptr(),
1,
d3d11::D3D11_SDK_VERSION,
&mut device as *mut *mut _ as *mut *mut _,
&mut returned_level as *mut _,
&mut cxt as *mut *mut _ as *mut *mut _,
);
// NOTE: returns error if adapter argument is non-null and driver
// type is not unknown; or if debug device is requested but not
// present
if !winerror::SUCCEEDED(hr) {
return Err(hal::device::CreationError::InitializationFailed);
}
info!("feature level={:x}", feature_level);
(ComPtr::from_raw(device), ComPtr::from_raw(cxt))
};
let device = device::Device::new(device, cxt, self.memory_properties.clone());
// TODO: deferred context => 1 cxt/queue?
let queue_groups = families
.into_iter()
.map(|&(_family, prio)| {
assert_eq!(prio.len(), 1);
let mut group = queue::QueueGroup::new(queue::QueueFamilyId(0));
// TODO: multiple queues?
let queue = CommandQueue {
context: device.context.clone(),
};
group.add_queue(queue);
group
})
.collect();
Ok(adapter::Gpu {
device,
queue_groups,
})
}
fn format_properties(&self, fmt: Option<format::Format>) -> format::Properties {
let idx = fmt.map(|fmt| fmt as usize).unwrap_or(0);
self.format_properties[idx]
}
fn image_format_properties(
&self,
format: format::Format,
dimensions: u8,
tiling: image::Tiling,
usage: image::Usage,
view_caps: image::ViewCapabilities,
) -> Option<image::FormatProperties> {
conv::map_format(format)?; //filter out unknown formats
let supported_usage = {
use hal::image::Usage as U;
let format_props = &self.format_properties[format as usize];
let props = match tiling {
image::Tiling::Optimal => format_props.optimal_tiling,
image::Tiling::Linear => format_props.linear_tiling,
};
let mut flags = U::empty();
// Note: these checks would have been nicer if we had explicit BLIT usage
if props.contains(format::ImageFeature::BLIT_SRC) {
flags |= U::TRANSFER_SRC;
}
if props.contains(format::ImageFeature::BLIT_DST) {
flags |= U::TRANSFER_DST;
}
if props.contains(format::ImageFeature::SAMPLED) {
flags |= U::SAMPLED;
}
if props.contains(format::ImageFeature::STORAGE) {
flags |= U::STORAGE;
}
if props.contains(format::ImageFeature::COLOR_ATTACHMENT) {
flags |= U::COLOR_ATTACHMENT;
}
if props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) {
flags |= U::DEPTH_STENCIL_ATTACHMENT;
}
flags
};
if !supported_usage.contains(usage) {
return None;
}
let max_resource_size =
(d3d11::D3D11_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM as usize) << 20;
Some(match tiling {
image::Tiling::Optimal => image::FormatProperties {
max_extent: match dimensions {
1 => image::Extent {
width: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION,
height: 1,
depth: 1,
},
2 => image::Extent {
width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
depth: 1,
},
3 => image::Extent {
width: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION,
height: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION,
depth: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION,
},
_ => return None,
},
max_levels: d3d11::D3D11_REQ_MIP_LEVELS as _,
max_layers: match dimensions {
1 => d3d11::D3D11_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION as _,
2 => d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _,
_ => return None,
},
sample_count_mask: if dimensions == 2
&& !view_caps.contains(image::ViewCapabilities::KIND_CUBE)
&& (usage.contains(image::Usage::COLOR_ATTACHMENT)
| usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT))
{
0x3F //TODO: use D3D12_FEATURE_DATA_FORMAT_SUPPORT
} else {
0x1
},
max_resource_size,
},
image::Tiling::Linear => image::FormatProperties {
max_extent: match dimensions {
2 => image::Extent {
width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
depth: 1,
},
_ => return None,
},
max_levels: 1,
max_layers: 1,
sample_count_mask: 0x1,
max_resource_size,
},
})
}
fn memory_properties(&self) -> adapter::MemoryProperties {
self.memory_properties.clone()
}
fn features(&self) -> hal::Features {
self.features
}
fn limits(&self) -> Limits {
self.limits
}
}
struct Presentation {
swapchain: ComPtr<IDXGISwapChain>,
view: ComPtr<d3d11::ID3D11RenderTargetView>,
format: format::Format,
size: window::Extent2D,
}
pub struct Surface {
pub(crate) factory: ComPtr<IDXGIFactory>,
wnd_handle: HWND,
presentation: Option<Presentation>,
}
impl fmt::Debug for Surface {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Surface")
}
}
unsafe impl Send for Surface {}
unsafe impl Sync for Surface {}
impl window::Surface<Backend> for Surface {
fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool {
true
}
fn capabilities(&self, _physical_device: &PhysicalDevice) -> window::SurfaceCapabilities {
let current_extent = unsafe {
let mut rect: RECT = mem::zeroed();
assert_ne!(
0,
GetClientRect(self.wnd_handle as *mut _, &mut rect as *mut RECT)
);
Some(window::Extent2D {
width: (rect.right - rect.left) as u32,
height: (rect.bottom - rect.top) as u32,
})
};
// TODO: flip swap effects require dx11.1/windows8
// NOTE: some swap effects affect msaa capabilities..
// TODO: _DISCARD swap effects can only have one image?
window::SurfaceCapabilities {
present_modes: window::PresentMode::FIFO, //TODO
composite_alpha_modes: window::CompositeAlphaMode::OPAQUE, //TODO
image_count: 1 ..= 16, // TODO:
current_extent,
extents: window::Extent2D {
width: 16,
height: 16,
} ..= window::Extent2D {
width: 4096,
height: 4096,
},
max_image_layers: 1,
usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC,
}
}
fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option<Vec<format::Format>> {
Some(vec![
format::Format::Bgra8Srgb,
format::Format::Bgra8Unorm,
format::Format::Rgba8Srgb,
format::Format::Rgba8Unorm,
format::Format::A2b10g10r10Unorm,
format::Format::Rgba16Sfloat,
])
}
}
impl window::PresentationSurface<Backend> for Surface {
type SwapchainImage = ImageView;
unsafe fn configure_swapchain(
&mut self,
device: &device::Device,
config: window::SwapchainConfig,
) -> Result<(), window::CreationError> {
assert!(image::Usage::COLOR_ATTACHMENT.contains(config.image_usage));
let swapchain = match self.presentation.take() {
Some(present) => {
if present.format == config.format && present.size == config.extent {
self.presentation = Some(present);
return Ok(());
}
let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap();
drop(present.view);
let result = present.swapchain.ResizeBuffers(
config.image_count,
config.extent.width,
config.extent.height,
non_srgb_format,
0,
);
if result != winerror::S_OK {
error!("ResizeBuffers failed with 0x{:x}", result as u32);
return Err(window::CreationError::WindowInUse(hal::device::WindowInUse));
}
present.swapchain
}
None => {
let (swapchain, _) =
device.create_swapchain_impl(&config, self.wnd_handle, self.factory.clone())?;
swapchain
}
};
let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut();
assert_eq!(
winerror::S_OK,
swapchain.GetBuffer(
0 as _,
&d3d11::ID3D11Resource::uuidof(),
&mut resource as *mut *mut _ as *mut *mut _,
)
);
let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1);
let format = conv::map_format(config.format).unwrap();
let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(format);
let view_info = ViewInfo {
resource,
kind,
caps: image::ViewCapabilities::empty(),
view_kind: image::ViewKind::D2,
format: decomposed.rtv.unwrap(),
range: image::SubresourceRange {
aspects: format::Aspects::COLOR,
levels: 0 .. 1,
layers: 0 .. 1,
},
};
let view = device.view_image_as_render_target(&view_info).unwrap();
(*resource).Release();
self.presentation = Some(Presentation {
swapchain,
view,
format: config.format,
size: config.extent,
});
Ok(())
}
unsafe fn unconfigure_swapchain(&mut self, _device: &device::Device) {
self.presentation = None;
}
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64, //TODO: use the timeout
) -> Result<(ImageView, Option<window::Suboptimal>), window::AcquireError> {
let present = self.presentation.as_ref().unwrap();
let image_view = ImageView {
format: present.format,
rtv_handle: Some(present.view.clone()),
dsv_handle: None,
srv_handle: None,
uav_handle: None,
};
Ok((image_view, None))
}
}
pub struct Swapchain {
dxgi_swapchain: ComPtr<IDXGISwapChain>,
}
impl fmt::Debug for Swapchain {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Swapchain")
}
}
unsafe impl Send for Swapchain {}
unsafe impl Sync for Swapchain {}
impl window::Swapchain<Backend> for Swapchain {
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
_semaphore: Option<&Semaphore>,
_fence: Option<&Fence>,
) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError> {
// TODO: non-`_DISCARD` swap effects have more than one buffer, `FLIP`
// effects are dxgi 1.3 (w10+?) in which case there is
// `GetCurrentBackBufferIndex()` on the swapchain
Ok((0, None))
}
}
#[derive(Debug, Clone, Copy)]
pub struct QueueFamily;
impl queue::QueueFamily for QueueFamily {
fn queue_type(&self) -> queue::QueueType {
queue::QueueType::General
}
fn max_queues(&self) -> usize {
1
}
fn id(&self) -> queue::QueueFamilyId {
queue::QueueFamilyId(0)
}
}
#[derive(Clone)]
pub struct CommandQueue {
context: ComPtr<d3d11::ID3D11DeviceContext>,
}
impl fmt::Debug for CommandQueue {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("CommandQueue")
}
}
unsafe impl Send for CommandQueue {}
unsafe impl Sync for CommandQueue {}
impl queue::CommandQueue<Backend> for CommandQueue {
unsafe fn submit<'a, T, Ic, S, Iw, Is>(
&mut self,
submission: queue::Submission<Ic, Iw, Is>,
fence: Option<&Fence>,
) where
T: 'a + Borrow<CommandBuffer>,
Ic: IntoIterator<Item = &'a T>,
S: 'a + Borrow<Semaphore>,
Iw: IntoIterator<Item = (&'a S, pso::PipelineStage)>,
Is: IntoIterator<Item = &'a S>,
{
let _scope = debug_scope!(&self.context, "Submit(fence={:?})", fence);
for cmd_buf in submission.command_buffers {
let cmd_buf = cmd_buf.borrow();
let _scope = debug_scope!(
&self.context,
"CommandBuffer ({}/{})",
cmd_buf.flush_coherent_memory.len(),
cmd_buf.invalidate_coherent_memory.len()
);
{
let _scope = debug_scope!(&self.context, "Pre-Exec: Flush");
for sync in &cmd_buf.flush_coherent_memory {
sync.do_flush(&self.context);
}
}
self.context
.ExecuteCommandList(cmd_buf.as_raw_list().as_raw(), FALSE);
{
let _scope = debug_scope!(&self.context, "Post-Exec: Invalidate");
for sync in &cmd_buf.invalidate_coherent_memory {
sync.do_invalidate(&self.context);
}
}
}
if let Some(fence) = fence {
*fence.mutex.lock() = true;
fence.condvar.notify_all();
}
}
unsafe fn present<'a, W, Is, S, Iw>(
&mut self,
swapchains: Is,
_wait_semaphores: Iw,
) -> Result<Option<window::Suboptimal>, window::PresentError>
where
W: 'a + Borrow<Swapchain>,
Is: IntoIterator<Item = (&'a W, window::SwapImageIndex)>,
S: 'a + Borrow<Semaphore>,
Iw: IntoIterator<Item = &'a S>,
{
for (swapchain, _idx) in swapchains {
swapchain.borrow().dxgi_swapchain.Present(1, 0);
}
Ok(None)
}
unsafe fn present_surface(
&mut self,
surface: &mut Surface,
_image: ImageView,
_wait_semaphore: Option<&Semaphore>,
) -> Result<Option<window::Suboptimal>, window::PresentError> {
surface
.presentation
.as_ref()
.unwrap()
.swapchain
.Present(1, 0);
Ok(None)
}
fn wait_idle(&self) -> Result<(), hal::device::OutOfMemory> {
// unimplemented!()
Ok(())
}
}
#[derive(Debug)]
pub struct AttachmentClear {
subpass_id: Option<pass::SubpassId>,
attachment_id: usize,
raw: command::AttachmentClear,
}
#[derive(Debug)]
pub struct RenderPassCache {
pub render_pass: RenderPass,
pub framebuffer: Framebuffer,
pub attachment_clear_values: Vec<AttachmentClear>,
pub target_rect: pso::Rect,
pub current_subpass: usize,
}
impl RenderPassCache {
pub fn start_subpass(
&mut self,
internal: &mut internal::Internal,
context: &ComPtr<d3d11::ID3D11DeviceContext>,
cache: &mut CommandBufferState,
) {
let attachments = self
.attachment_clear_values
.iter()
.filter(|clear| clear.subpass_id == Some(self.current_subpass))
.map(|clear| clear.raw);
cache
.dirty_flag
.insert(DirtyStateFlag::GRAPHICS_PIPELINE | DirtyStateFlag::VIEWPORTS);
internal.clear_attachments(
context,
attachments,
&[pso::ClearRect {
rect: self.target_rect,
layers: 0 .. 1,
}],
&self,
);
let subpass = &self.render_pass.subpasses[self.current_subpass];
let color_views = subpass
.color_attachments
.iter()
.map(|&(id, _)| {
self.framebuffer.attachments[id]
.rtv_handle
.clone()
.unwrap()
.as_raw()
})
.collect::<Vec<_>>();
let ds_view = match subpass.depth_stencil_attachment {
Some((id, _)) => Some(
self.framebuffer.attachments[id]
.dsv_handle
.clone()
.unwrap()
.as_raw(),
),
None => None,
};
cache.set_render_targets(&color_views, ds_view);
cache.bind(context);
}
pub fn next_subpass(&mut self) {
self.current_subpass += 1;
}
}
bitflags! {
struct DirtyStateFlag : u32 {
const RENDER_TARGETS = (1 << 1);
const VERTEX_BUFFERS = (1 << 2);
const GRAPHICS_PIPELINE = (1 << 3);
const VIEWPORTS = (1 << 4);
const BLEND_STATE = (1 << 5);
}
}
pub struct CommandBufferState {
dirty_flag: DirtyStateFlag,
render_target_len: u32,
render_targets: [*mut d3d11::ID3D11RenderTargetView; 8],
depth_target: Option<*mut d3d11::ID3D11DepthStencilView>,
graphics_pipeline: Option<GraphicsPipeline>,
// a bitmask that keeps track of what vertex buffer bindings have been "bound" into
// our vec
bound_bindings: u32,
// a bitmask that hold the required binding slots to be bound for the currently
// bound pipeline
required_bindings: Option<u32>,
// the highest binding number in currently bound pipeline
max_bindings: Option<u32>,
viewports: Vec<d3d11::D3D11_VIEWPORT>,
vertex_buffers: Vec<*mut d3d11::ID3D11Buffer>,
vertex_offsets: Vec<u32>,
vertex_strides: Vec<u32>,
blend_factor: Option<[f32; 4]>,
// we can only support one face (rather, both faces must have the same value)
stencil_ref: Option<pso::StencilValue>,
stencil_read_mask: Option<pso::StencilValue>,
stencil_write_mask: Option<pso::StencilValue>,
current_blend: Option<*mut d3d11::ID3D11BlendState>,
}
impl fmt::Debug for CommandBufferState {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("CommandBufferState")
}
}
impl CommandBufferState {
fn new() -> Self {
CommandBufferState {
dirty_flag: DirtyStateFlag::empty(),
render_target_len: 0,
render_targets: [ptr::null_mut(); 8],
depth_target: None,
graphics_pipeline: None,
bound_bindings: 0,
required_bindings: None,
max_bindings: None,
viewports: Vec::new(),
vertex_buffers: Vec::new(),
vertex_offsets: Vec::new(),
vertex_strides: Vec::new(),
blend_factor: None,
stencil_ref: None,
stencil_read_mask: None,
stencil_write_mask: None,
current_blend: None,
}
}
fn clear(&mut self) {
self.render_target_len = 0;
self.depth_target = None;
self.graphics_pipeline = None;
self.bound_bindings = 0;
self.required_bindings = None;
self.max_bindings = None;
self.viewports.clear();
self.vertex_buffers.clear();
self.vertex_offsets.clear();
self.vertex_strides.clear();
self.blend_factor = None;
self.stencil_ref = None;
self.stencil_read_mask = None;
self.stencil_write_mask = None;
self.current_blend = None;
}
pub fn set_vertex_buffer(
&mut self,
index: usize,
offset: u32,
buffer: *mut d3d11::ID3D11Buffer,
) {
self.bound_bindings |= 1 << index as u32;
if index >= self.vertex_buffers.len() {
self.vertex_buffers.push(buffer);
self.vertex_offsets.push(offset);
} else {
self.vertex_buffers[index] = buffer;
self.vertex_offsets[index] = offset;
}
self.dirty_flag.insert(DirtyStateFlag::VERTEX_BUFFERS);
}
pub fn bind_vertex_buffers(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
if let Some(binding_count) = self.max_bindings {
if self.vertex_buffers.len() >= binding_count as usize
&& self.vertex_strides.len() >= binding_count as usize
{
unsafe {
context.IASetVertexBuffers(
0,
binding_count,
self.vertex_buffers.as_ptr(),
self.vertex_strides.as_ptr(),
self.vertex_offsets.as_ptr(),
);
}
self.dirty_flag.remove(DirtyStateFlag::VERTEX_BUFFERS);
}
}
}
pub fn set_viewports(&mut self, viewports: &[d3d11::D3D11_VIEWPORT]) {
self.viewports.clear();
self.viewports.extend(viewports);
self.dirty_flag.insert(DirtyStateFlag::VIEWPORTS);
}
pub fn bind_viewports(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
if let Some(ref pipeline) = self.graphics_pipeline {
if let Some(ref viewport) = pipeline.baked_states.viewport {
unsafe {
context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr());
}
} else {
unsafe {
context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr());
}
}
} else {
unsafe {
context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr());
}
}
self.dirty_flag.remove(DirtyStateFlag::VIEWPORTS);
}
pub fn set_render_targets(
&mut self,
render_targets: &[*mut d3d11::ID3D11RenderTargetView],
depth_target: Option<*mut d3d11::ID3D11DepthStencilView>,
) {
for (idx, &rt) in render_targets.iter().enumerate() {
self.render_targets[idx] = rt;
}
self.render_target_len = render_targets.len() as u32;
self.depth_target = depth_target;
self.dirty_flag.insert(DirtyStateFlag::RENDER_TARGETS);
}
pub fn bind_render_targets(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
unsafe {
context.OMSetRenderTargets(
self.render_target_len,
self.render_targets.as_ptr(),
if let Some(dsv) = self.depth_target {
dsv
} else {
ptr::null_mut()
},
);
}
self.dirty_flag.remove(DirtyStateFlag::RENDER_TARGETS);
}
pub fn set_blend_factor(&mut self, factor: [f32; 4]) {
self.blend_factor = Some(factor);
self.dirty_flag.insert(DirtyStateFlag::BLEND_STATE);
}
pub fn bind_blend_state(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
if let Some(blend) = self.current_blend {
let blend_color = if let Some(ref pipeline) = self.graphics_pipeline {
pipeline
.baked_states
.blend_color
.or(self.blend_factor)
.unwrap_or([0f32; 4])
} else {
self.blend_factor.unwrap_or([0f32; 4])
};
// TODO: MSAA
unsafe {
context.OMSetBlendState(blend, &blend_color, !0);
}
self.dirty_flag.remove(DirtyStateFlag::BLEND_STATE);
}
}
pub fn set_graphics_pipeline(&mut self, pipeline: GraphicsPipeline) {
self.graphics_pipeline = Some(pipeline);
self.dirty_flag.insert(DirtyStateFlag::GRAPHICS_PIPELINE);
}
pub fn bind_graphics_pipeline(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
if let Some(ref pipeline) = self.graphics_pipeline {
self.vertex_strides.clear();
self.vertex_strides.extend(&pipeline.strides);
self.required_bindings = Some(pipeline.required_bindings);
self.max_bindings = Some(pipeline.max_vertex_bindings);
};
self.bind_vertex_buffers(context);
if let Some(ref pipeline) = self.graphics_pipeline {
unsafe {
context.IASetPrimitiveTopology(pipeline.topology);
context.IASetInputLayout(pipeline.input_layout.as_raw());
context.VSSetShader(pipeline.vs.as_raw(), ptr::null_mut(), 0);
if let Some(ref ps) = pipeline.ps {
context.PSSetShader(ps.as_raw(), ptr::null_mut(), 0);
}
if let Some(ref gs) = pipeline.gs {
context.GSSetShader(gs.as_raw(), ptr::null_mut(), 0);
}
if let Some(ref hs) = pipeline.hs {
context.HSSetShader(hs.as_raw(), ptr::null_mut(), 0);
}
if let Some(ref ds) = pipeline.ds {
context.DSSetShader(ds.as_raw(), ptr::null_mut(), 0);
}
context.RSSetState(pipeline.rasterizer_state.as_raw());
if let Some(ref viewport) = pipeline.baked_states.viewport {
context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr());
}
if let Some(ref scissor) = pipeline.baked_states.scissor {
context.RSSetScissorRects(1, [conv::map_rect(&scissor)].as_ptr());
}
if let Some((ref state, reference)) = pipeline.depth_stencil_state {
let stencil_ref = if let pso::State::Static(reference) = reference {
reference
} else {
self.stencil_ref.unwrap_or(0)
};
context.OMSetDepthStencilState(state.as_raw(), stencil_ref);
}
self.current_blend = Some(pipeline.blend_state.as_raw());
}
};
self.bind_blend_state(context);
self.dirty_flag.remove(DirtyStateFlag::GRAPHICS_PIPELINE);
}
pub fn bind(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
if self.dirty_flag.contains(DirtyStateFlag::RENDER_TARGETS) {
self.bind_render_targets(context);
}
if self.dirty_flag.contains(DirtyStateFlag::GRAPHICS_PIPELINE) {
self.bind_graphics_pipeline(context);
}
if self.dirty_flag.contains(DirtyStateFlag::VERTEX_BUFFERS) {
self.bind_vertex_buffers(context);
}
if self.dirty_flag.contains(DirtyStateFlag::VIEWPORTS) {
self.bind_viewports(context);
}
}
}
pub struct CommandBuffer {
// TODO: better way of sharing
internal: internal::Internal,
context: ComPtr<d3d11::ID3D11DeviceContext>,
list: RefCell<Option<ComPtr<d3d11::ID3D11CommandList>>>,
// since coherent memory needs to be synchronized at submission, we need to gather up all
// coherent resources that are used in the command buffer and flush/invalidate them accordingly
// before executing.
flush_coherent_memory: Vec<MemoryFlush>,
invalidate_coherent_memory: Vec<MemoryInvalidate>,
// holds information about the active render pass
render_pass_cache: Option<RenderPassCache>,
cache: CommandBufferState,
one_time_submit: bool,
}
impl fmt::Debug for CommandBuffer {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("CommandBuffer")
}
}
unsafe impl Send for CommandBuffer {}
unsafe impl Sync for CommandBuffer {}
impl CommandBuffer {
fn create_deferred(device: ComPtr<d3d11::ID3D11Device>, internal: internal::Internal) -> Self {
let mut context: *mut d3d11::ID3D11DeviceContext = ptr::null_mut();
let hr =
unsafe { device.CreateDeferredContext(0, &mut context as *mut *mut _ as *mut *mut _) };
assert_eq!(hr, winerror::S_OK);
CommandBuffer {
internal,
context: unsafe { ComPtr::from_raw(context) },
list: RefCell::new(None),
flush_coherent_memory: Vec::new(),
invalidate_coherent_memory: Vec::new(),
render_pass_cache: None,
cache: CommandBufferState::new(),
one_time_submit: false,
}
}
fn as_raw_list(&self) -> ComPtr<d3d11::ID3D11CommandList> {
if self.one_time_submit {
self.list.replace(None).unwrap()
} else {
self.list.borrow().clone().unwrap()
}
}
fn defer_coherent_flush(&mut self, buffer: &Buffer) {
if !self
.flush_coherent_memory
.iter()
.any(|m| m.buffer == buffer.internal.raw)
{
self.flush_coherent_memory.push(MemoryFlush {
host_memory: buffer.host_ptr,
sync_range: SyncRange::Whole,
buffer: buffer.internal.raw,
});
}
}
fn defer_coherent_invalidate(&mut self, buffer: &Buffer) {
if !self
.invalidate_coherent_memory
.iter()
.any(|m| m.buffer == buffer.internal.raw)
{
self.invalidate_coherent_memory.push(MemoryInvalidate {
working_buffer: Some(self.internal.working_buffer.clone()),
working_buffer_size: self.internal.working_buffer_size,
host_memory: buffer.host_ptr,
sync_range: buffer.bound_range.clone(),
buffer: buffer.internal.raw,
});
}
}
fn reset(&mut self) {
self.flush_coherent_memory.clear();
self.invalidate_coherent_memory.clear();
self.render_pass_cache = None;
self.cache.clear();
}
}
impl command::CommandBuffer<Backend> for CommandBuffer {
unsafe fn begin(
&mut self,
flags: command::CommandBufferFlags,
_info: command::CommandBufferInheritanceInfo<Backend>,
) {
self.one_time_submit = flags.contains(command::CommandBufferFlags::ONE_TIME_SUBMIT);
self.reset();
}
unsafe fn finish(&mut self) {
let mut list = ptr::null_mut();
let hr = self
.context
.FinishCommandList(FALSE, &mut list as *mut *mut _ as *mut *mut _);
assert_eq!(hr, winerror::S_OK);
self.list.replace(Some(ComPtr::from_raw(list)));
}
unsafe fn reset(&mut self, _release_resources: bool) {
self.reset();
}
unsafe fn begin_render_pass<T>(
&mut self,
render_pass: &RenderPass,
framebuffer: &Framebuffer,
target_rect: pso::Rect,
clear_values: T,
_first_subpass: command::SubpassContents,
) where
T: IntoIterator,
T::Item: Borrow<command::ClearValue>,
{
use pass::AttachmentLoadOp as Alo;
let mut clear_iter = clear_values.into_iter();
let mut attachment_clears = Vec::new();
for (idx, attachment) in render_pass.attachments.iter().enumerate() {
//let attachment = render_pass.attachments[attachment_ref];
let format = attachment.format.unwrap();
let subpass_id = render_pass.subpasses.iter().position(|sp| sp.is_using(idx));
if attachment.has_clears() {
let value = *clear_iter.next().unwrap().borrow();
match (attachment.ops.load, attachment.stencil_ops.load) {
(Alo::Clear, Alo::Clear) if format.is_depth() => {
attachment_clears.push(AttachmentClear {
subpass_id,
attachment_id: idx,
raw: command::AttachmentClear::DepthStencil {
depth: Some(value.depth_stencil.depth),
stencil: Some(value.depth_stencil.stencil),
},
});
}
(Alo::Clear, Alo::Clear) => {
attachment_clears.push(AttachmentClear {
subpass_id,
attachment_id: idx,
raw: command::AttachmentClear::Color {
index: idx,
value: value.color,
},
});
attachment_clears.push(AttachmentClear {
subpass_id,
attachment_id: idx,
raw: command::AttachmentClear::DepthStencil {
depth: None,
stencil: Some(value.depth_stencil.stencil),
},
});
}
(Alo::Clear, _) if format.is_depth() => {
attachment_clears.push(AttachmentClear {
subpass_id,
attachment_id: idx,
raw: command::AttachmentClear::DepthStencil {
depth: Some(value.depth_stencil.depth),
stencil: None,
},
});
}
(Alo::Clear, _) => {
attachment_clears.push(AttachmentClear {
subpass_id,
attachment_id: idx,
raw: command::AttachmentClear::Color {
index: idx,
value: value.color,
},
});
}
(_, Alo::Clear) => {
attachment_clears.push(AttachmentClear {
subpass_id,
attachment_id: idx,
raw: command::AttachmentClear::DepthStencil {
depth: None,
stencil: Some(value.depth_stencil.stencil),
},
});
}
_ => {}
}
}
}
self.render_pass_cache = Some(RenderPassCache {
render_pass: render_pass.clone(),
framebuffer: framebuffer.clone(),
attachment_clear_values: attachment_clears,
target_rect,
current_subpass: 0,
});
if let Some(ref mut current_render_pass) = self.render_pass_cache {
current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache);
}
}
unsafe fn next_subpass(&mut self, _contents: command::SubpassContents) {
if let Some(ref mut current_render_pass) = self.render_pass_cache {
// TODO: resolve msaa
current_render_pass.next_subpass();
current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache);
}
}
unsafe fn end_render_pass(&mut self) {
self.context
.OMSetRenderTargets(8, [ptr::null_mut(); 8].as_ptr(), ptr::null_mut());
self.render_pass_cache = None;
}
unsafe fn pipeline_barrier<'a, T>(
&mut self,
_stages: Range<pso::PipelineStage>,
_dependencies: memory::Dependencies,
_barriers: T,
) where
T: IntoIterator,
T::Item: Borrow<memory::Barrier<'a, Backend>>,
{
// TODO: should we track and assert on resource states?
// unimplemented!()
}
unsafe fn clear_image<T>(
&mut self,
image: &Image,
_: image::Layout,
value: command::ClearValue,
subresource_ranges: T,
) where
T: IntoIterator,
T::Item: Borrow<image::SubresourceRange>,
{
for range in subresource_ranges {
let range = range.borrow();
// TODO: clear Int/Uint depending on format
if range.aspects.contains(format::Aspects::COLOR) {
for layer in range.layers.clone() {
for level in range.levels.clone() {
self.context.ClearRenderTargetView(
image.get_rtv(level, layer).unwrap().as_raw(),
&value.color.float32,
);
}
}
}
let mut depth_stencil_flags = 0;
if range.aspects.contains(format::Aspects::DEPTH) {
depth_stencil_flags |= d3d11::D3D11_CLEAR_DEPTH;
}
if range.aspects.contains(format::Aspects::STENCIL) {
depth_stencil_flags |= d3d11::D3D11_CLEAR_STENCIL;
}
if depth_stencil_flags != 0 {
for layer in range.layers.clone() {
for level in range.levels.clone() {
self.context.ClearDepthStencilView(
image.get_dsv(level, layer).unwrap().as_raw(),
depth_stencil_flags,
value.depth_stencil.depth,
value.depth_stencil.stencil as _,
);
}
}
}
}
}
unsafe fn clear_attachments<T, U>(&mut self, clears: T, rects: U)
where
T: IntoIterator,
T::Item: Borrow<command::AttachmentClear>,
U: IntoIterator,
U::Item: Borrow<pso::ClearRect>,
{
if let Some(ref pass) = self.render_pass_cache {
self.cache.dirty_flag.insert(
DirtyStateFlag::GRAPHICS_PIPELINE
| DirtyStateFlag::VIEWPORTS
| DirtyStateFlag::RENDER_TARGETS,
);
self.internal
.clear_attachments(&self.context, clears, rects, pass);
self.cache.bind(&self.context);
} else {
panic!("`clear_attachments` can only be called inside a renderpass")
}
}
unsafe fn resolve_image<T>(
&mut self,
_src: &Image,
_src_layout: image::Layout,
_dst: &Image,
_dst_layout: image::Layout,
_regions: T,
) where
T: IntoIterator,
T::Item: Borrow<command::ImageResolve>,
{
unimplemented!()
}
unsafe fn blit_image<T>(
&mut self,
src: &Image,
_src_layout: image::Layout,
dst: &Image,
_dst_layout: image::Layout,
filter: image::Filter,
regions: T,
) where
T: IntoIterator,
T::Item: Borrow<command::ImageBlit>,
{
self.cache
.dirty_flag
.insert(DirtyStateFlag::GRAPHICS_PIPELINE);
self.internal
.blit_2d_image(&self.context, src, dst, filter, regions);
self.cache.bind(&self.context);
}
unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView<Backend>) {
self.context.IASetIndexBuffer(
ibv.buffer.internal.raw,
conv::map_index_type(ibv.index_type),
ibv.offset as u32,
);
}
unsafe fn bind_vertex_buffers<I, T>(&mut self, first_binding: pso::BufferIndex, buffers: I)
where
I: IntoIterator<Item = (T, buffer::Offset)>,
T: Borrow<Buffer>,
{
for (i, (buf, offset)) in buffers.into_iter().enumerate() {
let idx = i + first_binding as usize;
let buf = buf.borrow();
if buf.properties.contains(memory::Properties::COHERENT) {
self.defer_coherent_flush(buf);
}
self.cache
.set_vertex_buffer(idx, offset as u32, buf.internal.raw);
}
self.cache.bind_vertex_buffers(&self.context);
}
unsafe fn set_viewports<T>(&mut self, _first_viewport: u32, viewports: T)
where
T: IntoIterator,
T::Item: Borrow<pso::Viewport>,
{
let viewports = viewports
.into_iter()
.map(|v| {
let v = v.borrow();
conv::map_viewport(v)
})
.collect::<Vec<_>>();
// TODO: DX only lets us set all VPs at once, so cache in slice?
self.cache.set_viewports(&viewports);
self.cache.bind_viewports(&self.context);
}
unsafe fn set_scissors<T>(&mut self, _first_scissor: u32, scissors: T)
where
T: IntoIterator,
T::Item: Borrow<pso::Rect>,
{
let scissors = scissors
.into_iter()
.map(|s| {
let s = s.borrow();
conv::map_rect(s)
})
.collect::<Vec<_>>();
// TODO: same as for viewports
self.context
.RSSetScissorRects(scissors.len() as _, scissors.as_ptr());
}
unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) {
self.cache.set_blend_factor(color);
self.cache.bind_blend_state(&self.context);
}
unsafe fn set_stencil_reference(&mut self, _faces: pso::Face, value: pso::StencilValue) {
self.cache.stencil_ref = Some(value);
}
unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) {
self.cache.stencil_read_mask = Some(value);
}
unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) {
self.cache.stencil_write_mask = Some(value);
}
unsafe fn set_depth_bounds(&mut self, _bounds: Range<f32>) {
unimplemented!()
}
unsafe fn set_line_width(&mut self, width: f32) {
validate_line_width(width);
}
unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) {
// TODO:
// unimplemented!()
}
unsafe fn bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline) {
self.cache.set_graphics_pipeline(pipeline.clone());
self.cache.bind_graphics_pipeline(&self.context);
}
unsafe fn bind_graphics_descriptor_sets<'a, I, J>(
&mut self,
layout: &PipelineLayout,
first_set: usize,
sets: I,
_offsets: J,
) where
I: IntoIterator,
I::Item: Borrow<DescriptorSet>,
J: IntoIterator,
J::Item: Borrow<command::DescriptorSetOffset>,
{
let _scope = debug_scope!(&self.context, "BindGraphicsDescriptorSets");
// TODO: find a better solution to invalidating old bindings..
self.context.CSSetUnorderedAccessViews(
0,
16,
[ptr::null_mut(); 16].as_ptr(),
ptr::null_mut(),
);
//let offsets: Vec<command::DescriptorSetOffset> = offsets.into_iter().map(|o| *o.borrow()).collect();
for (set, info) in sets
.into_iter()
.zip(&layout.sets[first_set ..])
{
let set = set.borrow();
{
let coherent_buffers = set.coherent_buffers.lock();
for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() {
// TODO: merge sync range if a flush already exists
if !self
.flush_coherent_memory
.iter()
.any(|m| m.buffer == sync.device_buffer)
{
self.flush_coherent_memory.push(MemoryFlush {
host_memory: sync.host_ptr,
sync_range: sync.range.clone(),
buffer: sync.device_buffer,
});
}
}
for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() {
if !self
.invalidate_coherent_memory
.iter()
.any(|m| m.buffer == sync.device_buffer)
{
self.invalidate_coherent_memory.push(MemoryInvalidate {
working_buffer: Some(self.internal.working_buffer.clone()),
working_buffer_size: self.internal.working_buffer_size,
host_memory: sync.host_ptr,
sync_range: sync.range.clone(),
buffer: sync.device_buffer,
});
}
}
}
// TODO: offsets
if let Some(rd) = info.registers.vs.c.as_some() {
self.context.VSSetConstantBuffers(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.vs.t.as_some() {
self.context.VSSetShaderResources(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.vs.s.as_some() {
self.context.VSSetSamplers(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.ps.c.as_some() {
self.context.PSSetConstantBuffers(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.ps.t.as_some() {
self.context.PSSetShaderResources(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.ps.s.as_some() {
self.context.PSSetSamplers(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
}
}
unsafe fn bind_compute_pipeline(&mut self, pipeline: &ComputePipeline) {
self.context
.CSSetShader(pipeline.cs.as_raw(), ptr::null_mut(), 0);
}
unsafe fn bind_compute_descriptor_sets<I, J>(
&mut self,
layout: &PipelineLayout,
first_set: usize,
sets: I,
_offsets: J,
) where
I: IntoIterator,
I::Item: Borrow<DescriptorSet>,
J: IntoIterator,
J::Item: Borrow<command::DescriptorSetOffset>,
{
let _scope = debug_scope!(&self.context, "BindComputeDescriptorSets");
self.context.CSSetUnorderedAccessViews(
0,
16,
[ptr::null_mut(); 16].as_ptr(),
ptr::null_mut(),
);
for (set, info) in sets
.into_iter()
.zip(&layout.sets[first_set ..])
{
let set = set.borrow();
{
let coherent_buffers = set.coherent_buffers.lock();
for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() {
if !self
.flush_coherent_memory
.iter()
.any(|m| m.buffer == sync.device_buffer)
{
self.flush_coherent_memory.push(MemoryFlush {
host_memory: sync.host_ptr,
sync_range: sync.range.clone(),
buffer: sync.device_buffer,
});
}
}
for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() {
if !self
.invalidate_coherent_memory
.iter()
.any(|m| m.buffer == sync.device_buffer)
{
self.invalidate_coherent_memory.push(MemoryInvalidate {
working_buffer: Some(self.internal.working_buffer.clone()),
working_buffer_size: self.internal.working_buffer_size,
host_memory: sync.host_ptr,
sync_range: sync.range.clone(),
buffer: sync.device_buffer,
});
}
}
}
// TODO: offsets
if let Some(rd) = info.registers.cs.c.as_some() {
self.context.CSSetConstantBuffers(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.cs.t.as_some() {
self.context.CSSetShaderResources(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
if let Some(rd) = info.registers.cs.u.as_some() {
self.context.CSSetUnorderedAccessViews(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
ptr::null_mut(),
);
}
if let Some(rd) = info.registers.cs.s.as_some() {
self.context.CSSetSamplers(
rd.res_index as u32,
rd.count as u32,
set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _,
);
}
}
}
unsafe fn dispatch(&mut self, count: WorkGroupCount) {
self.context.Dispatch(count[0], count[1], count[2]);
}
unsafe fn dispatch_indirect(&mut self, _buffer: &Buffer, _offset: buffer::Offset) {
unimplemented!()
}
unsafe fn fill_buffer<R>(&mut self, _buffer: &Buffer, _range: R, _data: u32)
where
R: RangeArg<buffer::Offset>,
{
unimplemented!()
}
unsafe fn update_buffer(&mut self, _buffer: &Buffer, _offset: buffer::Offset, _data: &[u8]) {
unimplemented!()
}
unsafe fn copy_buffer<T>(&mut self, src: &Buffer, dst: &Buffer, regions: T)
where
T: IntoIterator,
T::Item: Borrow<command::BufferCopy>,
{
if src.properties.contains(memory::Properties::COHERENT) {
self.defer_coherent_flush(src);
}
for region in regions.into_iter() {
let info = region.borrow();
let dst_box = d3d11::D3D11_BOX {
left: info.src as _,
top: 0,
front: 0,
right: (info.src + info.size) as _,
bottom: 1,
back: 1,
};
self.context.CopySubresourceRegion(
dst.internal.raw as _,
0,
info.dst as _,
0,
0,
src.internal.raw as _,
0,
&dst_box,
);
if let Some(disjoint_cb) = dst.internal.disjoint_cb {
self.context.CopySubresourceRegion(
disjoint_cb as _,
0,
info.dst as _,
0,
0,
src.internal.raw as _,
0,
&dst_box,
);
}
}
}
unsafe fn copy_image<T>(
&mut self,
src: &Image,
_: image::Layout,
dst: &Image,
_: image::Layout,
regions: T,
) where
T: IntoIterator,
T::Item: Borrow<command::ImageCopy>,
{
self.internal
.copy_image_2d(&self.context, src, dst, regions);
}
unsafe fn copy_buffer_to_image<T>(
&mut self,
buffer: &Buffer,
image: &Image,
_: image::Layout,
regions: T,
) where
T: IntoIterator,
T::Item: Borrow<command::BufferImageCopy>,
{
if buffer.properties.contains(memory::Properties::COHERENT) {
self.defer_coherent_flush(buffer);
}
self.internal
.copy_buffer_to_image_2d(&self.context, buffer, image, regions);
}
unsafe fn copy_image_to_buffer<T>(
&mut self,
image: &Image,
_: image::Layout,
buffer: &Buffer,
regions: T,
) where
T: IntoIterator,
T::Item: Borrow<command::BufferImageCopy>,
{
if buffer.properties.contains(memory::Properties::COHERENT) {
self.defer_coherent_invalidate(buffer);
}
self.internal
.copy_image_2d_to_buffer(&self.context, image, buffer, regions);
}
unsafe fn draw(&mut self, vertices: Range<VertexCount>, instances: Range<InstanceCount>) {
self.context.DrawInstanced(
vertices.end - vertices.start,
instances.end - instances.start,
vertices.start,
instances.start,
);
}
unsafe fn draw_indexed(
&mut self,
indices: Range<IndexCount>,
base_vertex: VertexOffset,
instances: Range<InstanceCount>,
) {
self.context.DrawIndexedInstanced(
indices.end - indices.start,
instances.end - instances.start,
indices.start,
base_vertex,
instances.start,
);
}
unsafe fn draw_indirect(
&mut self,
_buffer: &Buffer,
_offset: buffer::Offset,
_draw_count: DrawCount,
_stride: u32,
) {
unimplemented!()
}
unsafe fn draw_indexed_indirect(
&mut self,
_buffer: &Buffer,
_offset: buffer::Offset,
_draw_count: DrawCount,
_stride: u32,
) {
unimplemented!()
}
unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) {
unimplemented!()
}
unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) {
unimplemented!()
}
unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range<pso::PipelineStage>, _: J)
where
I: IntoIterator,
I::Item: Borrow<()>,
J: IntoIterator,
J::Item: Borrow<memory::Barrier<'a, Backend>>,
{
unimplemented!()
}
unsafe fn begin_query(&mut self, _query: query::Query<Backend>, _flags: query::ControlFlags) {
unimplemented!()
}
unsafe fn end_query(&mut self, _query: query::Query<Backend>) {
unimplemented!()
}
unsafe fn reset_query_pool(&mut self, _pool: &QueryPool, _queries: Range<query::Id>) {
unimplemented!()
}
unsafe fn copy_query_pool_results(
&mut self,
_pool: &QueryPool,
_queries: Range<query::Id>,
_buffer: &Buffer,
_offset: buffer::Offset,
_stride: buffer::Offset,
_flags: query::ResultFlags,
) {
unimplemented!()
}
unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _query: query::Query<Backend>) {
unimplemented!()
}
unsafe fn push_graphics_constants(
&mut self,
_layout: &PipelineLayout,
_stages: pso::ShaderStageFlags,
_offset: u32,
_constants: &[u32],
) {
// unimplemented!()
}
unsafe fn push_compute_constants(
&mut self,
_layout: &PipelineLayout,
_offset: u32,
_constants: &[u32],
) {
unimplemented!()
}
unsafe fn execute_commands<'a, T, I>(&mut self, _buffers: I)
where
T: 'a + Borrow<CommandBuffer>,
I: IntoIterator<Item = &'a T>,
{
unimplemented!()
}
}
#[derive(Clone, Debug)]
enum SyncRange {
Whole,
Partial(Range<u64>),
}
#[derive(Debug)]
pub struct MemoryFlush {
host_memory: *mut u8,
sync_range: SyncRange,
buffer: *mut d3d11::ID3D11Buffer,
}
pub struct MemoryInvalidate {
working_buffer: Option<ComPtr<d3d11::ID3D11Buffer>>,
working_buffer_size: u64,
host_memory: *mut u8,
sync_range: Range<u64>,
buffer: *mut d3d11::ID3D11Buffer,
}
impl fmt::Debug for MemoryInvalidate {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("MemoryInvalidate")
}
}
fn intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>> {
let min = if a.start < b.start { a } else { b };
let max = if min == a { b } else { a };
if min.end < max.start {
None
} else {
let end = if min.end < max.end { min.end } else { max.end };
Some(max.start .. end)
}
}
impl MemoryFlush {
fn do_flush(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
let src = self.host_memory;
debug_marker!(context, "Flush({:?})", self.sync_range);
let region = match self.sync_range {
SyncRange::Partial(ref range) if range.start < range.end => Some(d3d11::D3D11_BOX {
left: range.start as u32,
top: 0,
front: 0,
right: range.end as u32,
bottom: 1,
back: 1,
}),
_ => None,
};
unsafe {
context.UpdateSubresource(
self.buffer as _,
0,
if let Some(region) = region {
®ion
} else {
ptr::null_mut()
},
src as _,
0,
0,
);
}
}
}
impl MemoryInvalidate {
fn download(
&self,
context: &ComPtr<d3d11::ID3D11DeviceContext>,
buffer: *mut d3d11::ID3D11Buffer,
range: Range<u64>,
) {
unsafe {
context.CopySubresourceRegion(
self.working_buffer.clone().unwrap().as_raw() as _,
0,
0,
0,
0,
buffer as _,
0,
&d3d11::D3D11_BOX {
left: range.start as _,
top: 0,
front: 0,
right: range.end as _,
bottom: 1,
back: 1,
},
);
// copy over to our vec
let dst = self.host_memory.offset(range.start as isize);
let src = self.map(&context);
ptr::copy(src, dst, (range.end - range.start) as usize);
self.unmap(&context);
}
}
fn do_invalidate(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
let stride = self.working_buffer_size;
let range = &self.sync_range;
let len = range.end - range.start;
let chunks = len / stride;
let remainder = len % stride;
// we split up the copies into chunks the size of our working buffer
for i in 0 .. chunks {
let offset = range.start + i * stride;
let range = offset .. (offset + stride);
self.download(context, self.buffer, range);
}
if remainder != 0 {
self.download(context, self.buffer, (chunks * stride) .. range.end);
}
}
fn map(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) -> *mut u8 {
assert_eq!(self.working_buffer.is_some(), true);
unsafe {
let mut map = mem::zeroed();
let hr = context.Map(
self.working_buffer.clone().unwrap().as_raw() as _,
0,
d3d11::D3D11_MAP_READ,
0,
&mut map,
);
assert_eq!(hr, winerror::S_OK);
map.pData as _
}
}
fn unmap(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
unsafe {
context.Unmap(self.working_buffer.clone().unwrap().as_raw() as _, 0);
}
}
}
// Since we dont have any heaps to work with directly, everytime we bind a
// buffer/image to memory we allocate a dx11 resource and assign it a range.
//
// `HOST_VISIBLE` memory gets a `Vec<u8>` which covers the entire memory
// range. This forces us to only expose non-coherent memory, as this
// abstraction acts as a "cache" since the "staging buffer" vec is disjoint
// from all the dx11 resources we store in the struct.
pub struct Memory {
properties: memory::Properties,
size: u64,
mapped_ptr: *mut u8,
// staging buffer covering the whole memory region, if it's HOST_VISIBLE
host_visible: Option<RefCell<Vec<u8>>>,
// list of all buffers bound to this memory
local_buffers: RefCell<Vec<(Range<u64>, InternalBuffer)>>,
// list of all images bound to this memory
_local_images: RefCell<Vec<(Range<u64>, InternalImage)>>,
}
impl fmt::Debug for Memory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Memory")
}
}
unsafe impl Send for Memory {}
unsafe impl Sync for Memory {}
impl Memory {
pub fn resolve<R: RangeArg<u64>>(&self, range: &R) -> Range<u64> {
*range.start().unwrap_or(&0) .. *range.end().unwrap_or(&self.size)
}
pub fn bind_buffer(&self, range: Range<u64>, buffer: InternalBuffer) {
self.local_buffers.borrow_mut().push((range, buffer));
}
pub fn flush(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>, range: Range<u64>) {
use buffer::Usage;
for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() {
if let Some(range) = intersection(&range, &buffer_range) {
let ptr = self.mapped_ptr;
// we need to handle 3 cases for updating buffers:
//
// 1. if our buffer was created as a `UNIFORM` buffer *and* other usage flags, we
// also have a disjoint buffer which only has `D3D11_BIND_CONSTANT_BUFFER` due
// to DX11 limitation. we then need to update both the original buffer and the
// disjoint one with the *whole* range (TODO: allow for partial updates)
//
// 2. if our buffer was created with *only* `UNIFORM` usage we need to upload
// the whole range (TODO: allow for partial updates)
//
// 3. the general case, without any `UNIFORM` usage has no restrictions on
// partial updates, so we upload the specified range
//
if buffer.usage.contains(Usage::UNIFORM) && buffer.usage != Usage::UNIFORM {
MemoryFlush {
host_memory: unsafe { ptr.offset(buffer_range.start as _) },
sync_range: SyncRange::Whole,
buffer: buffer.raw,
}
.do_flush(&context);
if let Some(disjoint) = buffer.disjoint_cb {
MemoryFlush {
host_memory: unsafe { ptr.offset(buffer_range.start as _) },
sync_range: SyncRange::Whole,
buffer: disjoint,
}
.do_flush(&context);
}
} else if buffer.usage == Usage::UNIFORM {
MemoryFlush {
host_memory: unsafe { ptr.offset(buffer_range.start as _) },
sync_range: SyncRange::Whole,
buffer: buffer.raw,
}
.do_flush(&context);
} else {
let local_start = range.start - buffer_range.start;
let local_len = range.end - range.start;
MemoryFlush {
host_memory: unsafe { ptr.offset(range.start as _) },
sync_range: SyncRange::Partial(local_start .. (local_start + local_len)),
buffer: buffer.raw,
}
.do_flush(&context);
}
}
}
}
pub fn invalidate(
&self,
context: &ComPtr<d3d11::ID3D11DeviceContext>,
range: Range<u64>,
working_buffer: ComPtr<d3d11::ID3D11Buffer>,
working_buffer_size: u64,
) {
for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() {
if let Some(range) = intersection(&range, &buffer_range) {
MemoryInvalidate {
working_buffer: Some(working_buffer.clone()),
working_buffer_size,
host_memory: self.mapped_ptr,
sync_range: range.clone(),
buffer: buffer.raw,
}
.do_invalidate(&context);
}
}
}
}
#[derive(Debug)]
pub struct CommandPool {
device: ComPtr<d3d11::ID3D11Device>,
internal: internal::Internal,
}
unsafe impl Send for CommandPool {}
unsafe impl Sync for CommandPool {}
impl hal::pool::CommandPool<Backend> for CommandPool {
unsafe fn reset(&mut self, _release_resources: bool) {
//unimplemented!()
}
unsafe fn allocate_one(&mut self, _level: command::Level) -> CommandBuffer {
CommandBuffer::create_deferred(self.device.clone(), self.internal.clone())
}
unsafe fn free<I>(&mut self, _cbufs: I)
where
I: IntoIterator<Item = CommandBuffer>,
{
// TODO:
// unimplemented!()
}
}
/// Similarily to dx12 backend, we can handle either precompiled dxbc or spirv
pub enum ShaderModule {
Dxbc(Vec<u8>),
Spirv(Vec<u32>),
}
// TODO: temporary
impl fmt::Debug for ShaderModule {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", "ShaderModule { ... }")
}
}
unsafe impl Send for ShaderModule {}
unsafe impl Sync for ShaderModule {}
#[derive(Clone, Debug)]
pub struct SubpassDesc {
pub color_attachments: Vec<pass::AttachmentRef>,
pub depth_stencil_attachment: Option<pass::AttachmentRef>,
pub input_attachments: Vec<pass::AttachmentRef>,
pub resolve_attachments: Vec<pass::AttachmentRef>,
}
impl SubpassDesc {
pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool {
self.color_attachments
.iter()
.chain(self.depth_stencil_attachment.iter())
.chain(self.input_attachments.iter())
.chain(self.resolve_attachments.iter())
.any(|&(id, _)| id == at_id)
}
}
#[derive(Clone, Debug)]
pub struct RenderPass {
pub attachments: Vec<pass::Attachment>,
pub subpasses: Vec<SubpassDesc>,
}
#[derive(Clone, Debug)]
pub struct Framebuffer {
attachments: Vec<ImageView>,
layers: image::Layer,
}
#[derive(Clone, Debug)]
pub struct InternalBuffer {
raw: *mut d3d11::ID3D11Buffer,
// TODO: need to sync between `raw` and `disjoint_cb`, same way as we do with
// `MemoryFlush/Invalidate`
disjoint_cb: Option<*mut d3d11::ID3D11Buffer>, // if unbound this buffer might be null.
srv: Option<*mut d3d11::ID3D11ShaderResourceView>,
uav: Option<*mut d3d11::ID3D11UnorderedAccessView>,
usage: buffer::Usage,
}
pub struct Buffer {
internal: InternalBuffer,
properties: memory::Properties, // empty if unbound
host_ptr: *mut u8, // null if unbound
bound_range: Range<u64>, // 0 if unbound
requirements: memory::Requirements,
bind: d3d11::D3D11_BIND_FLAG,
}
impl fmt::Debug for Buffer {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Buffer")
}
}
unsafe impl Send for Buffer {}
unsafe impl Sync for Buffer {}
#[derive(Debug)]
pub struct BufferView;
pub struct Image {
kind: image::Kind,
usage: image::Usage,
format: format::Format,
view_caps: image::ViewCapabilities,
decomposed_format: conv::DecomposedDxgiFormat,
mip_levels: image::Level,
internal: InternalImage,
bind: d3d11::D3D11_BIND_FLAG,
requirements: memory::Requirements,
}
impl fmt::Debug for Image {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Image")
}
}
pub struct InternalImage {
raw: *mut d3d11::ID3D11Resource,
copy_srv: Option<ComPtr<d3d11::ID3D11ShaderResourceView>>,
srv: Option<ComPtr<d3d11::ID3D11ShaderResourceView>>,
/// Contains UAVs for all subresources
unordered_access_views: Vec<ComPtr<d3d11::ID3D11UnorderedAccessView>>,
/// Contains DSVs for all subresources
depth_stencil_views: Vec<ComPtr<d3d11::ID3D11DepthStencilView>>,
/// Contains RTVs for all subresources
render_target_views: Vec<ComPtr<d3d11::ID3D11RenderTargetView>>,
}
impl fmt::Debug for InternalImage {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("InternalImage")
}
}
unsafe impl Send for Image {}
unsafe impl Sync for Image {}
impl Image {
pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT {
mip_level + (layer * self.mip_levels as UINT)
}
pub fn get_uav(
&self,
mip_level: image::Level,
_layer: image::Layer,
) -> Option<&ComPtr<d3d11::ID3D11UnorderedAccessView>> {
self.internal
.unordered_access_views
.get(self.calc_subresource(mip_level as _, 0) as usize)
}
pub fn get_dsv(
&self,
mip_level: image::Level,
layer: image::Layer,
) -> Option<&ComPtr<d3d11::ID3D11DepthStencilView>> {
self.internal
.depth_stencil_views
.get(self.calc_subresource(mip_level as _, layer as _) as usize)
}
pub fn get_rtv(
&self,
mip_level: image::Level,
layer: image::Layer,
) -> Option<&ComPtr<d3d11::ID3D11RenderTargetView>> {
self.internal
.render_target_views
.get(self.calc_subresource(mip_level as _, layer as _) as usize)
}
}
#[derive(Clone)]
pub struct ImageView {
format: format::Format,
rtv_handle: Option<ComPtr<d3d11::ID3D11RenderTargetView>>,
srv_handle: Option<ComPtr<d3d11::ID3D11ShaderResourceView>>,
dsv_handle: Option<ComPtr<d3d11::ID3D11DepthStencilView>>,
uav_handle: Option<ComPtr<d3d11::ID3D11UnorderedAccessView>>,
}
impl fmt::Debug for ImageView {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("ImageView")
}
}
unsafe impl Send for ImageView {}
unsafe impl Sync for ImageView {}
pub struct Sampler {
sampler_handle: ComPtr<d3d11::ID3D11SamplerState>,
}
impl fmt::Debug for Sampler {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Sampler")
}
}
unsafe impl Send for Sampler {}
unsafe impl Sync for Sampler {}
pub struct ComputePipeline {
cs: ComPtr<d3d11::ID3D11ComputeShader>,
}
impl fmt::Debug for ComputePipeline {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("ComputePipeline")
}
}
unsafe impl Send for ComputePipeline {}
unsafe impl Sync for ComputePipeline {}
/// NOTE: some objects are hashed internally and reused when created with the
/// same params[0], need to investigate which interfaces this applies
/// to.
///
/// [0]: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476500(v=vs.85).aspx
#[derive(Clone)]
pub struct GraphicsPipeline {
vs: ComPtr<d3d11::ID3D11VertexShader>,
gs: Option<ComPtr<d3d11::ID3D11GeometryShader>>,
hs: Option<ComPtr<d3d11::ID3D11HullShader>>,
ds: Option<ComPtr<d3d11::ID3D11DomainShader>>,
ps: Option<ComPtr<d3d11::ID3D11PixelShader>>,
topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY,
input_layout: ComPtr<d3d11::ID3D11InputLayout>,
rasterizer_state: ComPtr<d3d11::ID3D11RasterizerState>,
blend_state: ComPtr<d3d11::ID3D11BlendState>,
depth_stencil_state: Option<(
ComPtr<d3d11::ID3D11DepthStencilState>,
pso::State<pso::StencilValue>,
)>,
baked_states: pso::BakedStates,
required_bindings: u32,
max_vertex_bindings: u32,
strides: Vec<u32>,
}
impl fmt::Debug for GraphicsPipeline {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("GraphicsPipeline")
}
}
unsafe impl Send for GraphicsPipeline {}
unsafe impl Sync for GraphicsPipeline {}
type ResourceIndex = u8;
type DescriptorIndex = u16;
#[derive(Clone, Debug, Default)]
struct RegisterData<T> {
// CBV
c: T,
// SRV
t: T,
// UAV
u: T,
// Sampler
s: T,
}
impl<T> RegisterData<T> {
fn map<U, F: Fn(&T) -> U>(
&self,
fun: F,
) -> RegisterData<U> {
RegisterData {
c: fun(&self.c),
t: fun(&self.t),
u: fun(&self.u),
s: fun(&self.s),
}
}
}
impl RegisterData<DescriptorIndex> {
fn add_content_many(&mut self, content: DescriptorContent, many: DescriptorIndex) {
if content.contains(DescriptorContent::CBV) {
self.c += many;
}
if content.contains(DescriptorContent::SRV) {
self.t += many;
}
if content.contains(DescriptorContent::UAV) {
self.u += many;
}
if content.contains(DescriptorContent::SAMPLER) {
self.s += many;
}
}
fn add_content(&mut self, content: DescriptorContent) {
self.add_content_many(content, 1)
}
fn sum(&self) -> DescriptorIndex {
self.c + self.t + self.u + self.s
}
}
#[derive(Clone, Debug, Default)]
struct MultiStageData<T> {
vs: T,
ps: T,
cs: T,
}
impl<T> MultiStageData<T> {
fn select(self, stage: pso::Stage) -> T {
match stage {
pso::Stage::Vertex => self.vs,
pso::Stage::Fragment => self.ps,
pso::Stage::Compute => self.cs,
_ => panic!("Unsupported stage {:?}", stage)
}
}
}
impl<T> MultiStageData<RegisterData<T>> {
fn map_register<U, F: Fn(&T) -> U>(
&self,
fun: F,
) -> MultiStageData<RegisterData<U>> {
MultiStageData {
vs: self.vs.map(&fun),
ps: self.ps.map(&fun),
cs: self.cs.map(&fun),
}
}
fn map_other<U, F: Fn(&RegisterData<T>) -> U>(
&self,
fun: F,
) -> MultiStageData<U> {
MultiStageData {
vs: fun(&self.vs),
ps: fun(&self.ps),
cs: fun(&self.cs),
}
}
}
impl MultiStageData<RegisterData<DescriptorIndex>> {
fn add_content(&mut self, content: DescriptorContent, stages: pso::ShaderStageFlags) {
if stages.contains(pso::ShaderStageFlags::VERTEX) {
self.vs.add_content(content);
}
if stages.contains(pso::ShaderStageFlags::FRAGMENT) {
self.ps.add_content(content);
}
if stages.contains(pso::ShaderStageFlags::COMPUTE) {
self.cs.add_content(content);
}
}
fn sum(&self) -> DescriptorIndex {
self.vs.sum() + self.ps.sum() + self.cs.sum()
}
}
#[derive(Clone, Debug, Default)]
struct RegisterPoolMapping {
offset: DescriptorIndex,
count: ResourceIndex,
}
#[derive(Clone, Debug, Default)]
struct RegisterInfo {
res_index: ResourceIndex,
pool_offset: DescriptorIndex,
count: ResourceIndex,
}
impl RegisterInfo {
fn as_some(&self) -> Option<&Self> {
if self.count == 0 {
None
} else {
Some(self)
}
}
}
#[derive(Clone, Debug, Default)]
struct RegisterAccumulator {
res_index: ResourceIndex,
}
impl RegisterAccumulator {
fn to_mapping(
&self,
cur_offset: &mut DescriptorIndex,
) -> RegisterPoolMapping {
let offset = *cur_offset;
*cur_offset += self.res_index as DescriptorIndex;
RegisterPoolMapping {
offset,
count: self.res_index,
}
}
fn advance(
&mut self,
mapping: &RegisterPoolMapping,
) -> RegisterInfo {
let res_index = self.res_index;
self.res_index += mapping.count;
RegisterInfo {
res_index,
pool_offset: mapping.offset,
count: mapping.count,
}
}
}
impl RegisterData<RegisterAccumulator> {
fn to_mapping(
&self,
pool_offset: &mut DescriptorIndex,
) -> RegisterData<RegisterPoolMapping> {
RegisterData {
c: self.c.to_mapping(pool_offset),
t: self.t.to_mapping(pool_offset),
u: self.u.to_mapping(pool_offset),
s: self.s.to_mapping(pool_offset),
}
}
fn advance(
&mut self,
mapping: &RegisterData<RegisterPoolMapping>,
) -> RegisterData<RegisterInfo> {
RegisterData {
c: self.c.advance(&mapping.c),
t: self.t.advance(&mapping.t),
u: self.u.advance(&mapping.u),
s: self.s.advance(&mapping.s),
}
}
}
impl MultiStageData<RegisterData<RegisterAccumulator>> {
fn to_mapping(&self) -> MultiStageData<RegisterData<RegisterPoolMapping>> {
let mut pool_offset = 0;
MultiStageData {
vs: self.vs.to_mapping(&mut pool_offset),
ps: self.ps.to_mapping(&mut pool_offset),
cs: self.cs.to_mapping(&mut pool_offset),
}
}
fn advance(
&mut self,
mapping: &MultiStageData<RegisterData<RegisterPoolMapping>>,
) -> MultiStageData<RegisterData<RegisterInfo>> {
MultiStageData {
vs: self.vs.advance(&mapping.vs),
ps: self.ps.advance(&mapping.ps),
cs: self.cs.advance(&mapping.cs),
}
}
}
#[derive(Clone, Debug)]
struct DescriptorSetInfo {
bindings: Arc<Vec<pso::DescriptorSetLayoutBinding>>,
registers: MultiStageData<RegisterData<RegisterInfo>>,
}
impl DescriptorSetInfo {
fn find_register(
&self,
stage: pso::Stage,
binding_index: pso::DescriptorBinding,
) -> (DescriptorContent, RegisterData<ResourceIndex>) {
let mut res_offsets = self.registers
.map_register(|info| info.res_index as DescriptorIndex)
.select(stage);
for binding in self.bindings.iter() {
let content = DescriptorContent::from(binding.ty);
if binding.binding == binding_index {
return (content, res_offsets.map(|offset| *offset as ResourceIndex))
}
res_offsets.add_content(content);
}
panic!("Unable to find binding {:?}", binding_index);
}
}
/// The pipeline layout holds optimized (less api calls) ranges of objects for all descriptor sets
/// belonging to the pipeline object.
#[derive(Debug)]
pub struct PipelineLayout {
sets: Vec<DescriptorSetInfo>,
}
/// The descriptor set layout contains mappings from a given binding to the offset in our
/// descriptor pool storage and what type of descriptor it is (combined image sampler takes up two
/// handles).
#[derive(Debug)]
pub struct DescriptorSetLayout {
bindings: Arc<Vec<pso::DescriptorSetLayoutBinding>>,
pool_mapping: MultiStageData<RegisterData<RegisterPoolMapping>>,
}
#[derive(Debug)]
struct CoherentBufferFlushRange {
device_buffer: *mut d3d11::ID3D11Buffer,
host_ptr: *mut u8,
range: SyncRange,
}
#[derive(Debug)]
struct CoherentBufferInvalidateRange {
device_buffer: *mut d3d11::ID3D11Buffer,
host_ptr: *mut u8,
range: Range<u64>,
}
#[derive(Debug)]
struct CoherentBuffers {
// descriptor set writes containing coherent resources go into these vecs and are added to the
// command buffers own Vec on binding the set.
flush_coherent_buffers: RefCell<Vec<CoherentBufferFlushRange>>,
invalidate_coherent_buffers: RefCell<Vec<CoherentBufferInvalidateRange>>,
}
impl CoherentBuffers {
fn _add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
let new = buffer.internal.raw;
if old != new {
let mut buffers = self.flush_coherent_buffers.borrow_mut();
let pos = buffers.iter().position(|sync| old == sync.device_buffer);
let sync_range = CoherentBufferFlushRange {
device_buffer: new,
host_ptr: buffer.host_ptr,
range: SyncRange::Whole,
};
if let Some(pos) = pos {
buffers[pos] = sync_range;
} else {
buffers.push(sync_range);
}
if let Some(disjoint) = buffer.internal.disjoint_cb {
let pos = buffers
.iter()
.position(|sync| disjoint == sync.device_buffer);
let sync_range = CoherentBufferFlushRange {
device_buffer: disjoint,
host_ptr: buffer.host_ptr,
range: SyncRange::Whole,
};
if let Some(pos) = pos {
buffers[pos] = sync_range;
} else {
buffers.push(sync_range);
}
}
}
}
fn _add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
let new = buffer.internal.raw;
if old != new {
let mut buffers = self.invalidate_coherent_buffers.borrow_mut();
let pos = buffers.iter().position(|sync| old == sync.device_buffer);
let sync_range = CoherentBufferInvalidateRange {
device_buffer: new,
host_ptr: buffer.host_ptr,
range: buffer.bound_range.clone(),
};
if let Some(pos) = pos {
buffers[pos] = sync_range;
} else {
buffers.push(sync_range);
}
}
}
}
/// Newtype around a common interface that all bindable resources inherit from.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
struct Descriptor(*mut d3d11::ID3D11DeviceChild);
bitflags! {
/// A set of D3D11 descriptor types that need to be associated
/// with a single gfx-hal `DescriptorType`.
#[derive(Default)]
pub struct DescriptorContent: u8 {
const CBV = 0x1;
const SRV = 0x2;
const UAV = 0x4;
const SAMPLER = 0x8;
/// Indicates if the descriptor is a dynamic uniform/storage buffer.
/// Important as dynamic buffers are implemented as root descriptors.
const DYNAMIC = 0x10;
}
}
impl From<pso::DescriptorType> for DescriptorContent {
fn from(ty: pso::DescriptorType) -> Self {
use hal::pso::{
DescriptorType as Dt,
BufferDescriptorFormat as Bdf,
BufferDescriptorType as Bdt,
ImageDescriptorType as Idt,
};
match ty {
Dt::Sampler =>
DescriptorContent::SAMPLER,
Dt::Image { ty: Idt::Sampled { with_sampler: true } } =>
DescriptorContent::SRV | DescriptorContent::SAMPLER,
Dt::Image { ty: Idt::Sampled { with_sampler: false } } |
Dt::InputAttachment =>
DescriptorContent::SRV,
Dt::Image { ty: Idt::Storage } =>
DescriptorContent::SRV | DescriptorContent::UAV,
Dt::Buffer { ty: Bdt::Uniform, format: Bdf::Structured { dynamic_offset: true } } =>
DescriptorContent::CBV | DescriptorContent::DYNAMIC,
Dt::Buffer { ty: Bdt::Uniform, .. } =>
DescriptorContent::CBV,
Dt::Buffer { ty: Bdt::Storage { read_only: true }, format: Bdf::Structured { dynamic_offset: true } } =>
DescriptorContent::SRV | DescriptorContent::DYNAMIC,
Dt::Buffer { ty: Bdt::Storage { read_only: false }, format: Bdf::Structured { dynamic_offset: true } } =>
DescriptorContent::SRV | DescriptorContent::UAV | DescriptorContent::DYNAMIC,
Dt::Buffer { ty: Bdt::Storage { read_only: true }, .. } =>
DescriptorContent::SRV,
Dt::Buffer { ty: Bdt::Storage { read_only: false }, .. } =>
DescriptorContent::SRV | DescriptorContent::UAV,
}
}
}
pub struct DescriptorSet {
offset: DescriptorIndex,
len: DescriptorIndex,
handles: *mut Descriptor,
coherent_buffers: Mutex<CoherentBuffers>,
layout: DescriptorSetLayout,
}
impl fmt::Debug for DescriptorSet {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("DescriptorSet")
}
}
unsafe impl Send for DescriptorSet {}
unsafe impl Sync for DescriptorSet {}
impl DescriptorSet {
fn _add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
let new = buffer.internal.raw;
if old != new {
self.coherent_buffers.lock()._add_flush(old, buffer);
}
}
fn _add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
let new = buffer.internal.raw;
if old != new {
self.coherent_buffers.lock()._add_invalidate(old, buffer);
}
}
unsafe fn assign(&self, offset: DescriptorIndex, value: *mut d3d11::ID3D11DeviceChild) {
*self.handles.offset(offset as isize) = Descriptor(value);
}
unsafe fn assign_stages(
&self,
offsets: &MultiStageData<DescriptorIndex>,
stages: pso::ShaderStageFlags,
value: *mut d3d11::ID3D11DeviceChild,
) {
if stages.contains(pso::ShaderStageFlags::VERTEX) {
self.assign(offsets.vs, value);
}
if stages.contains(pso::ShaderStageFlags::FRAGMENT) {
self.assign(offsets.ps, value);
}
if stages.contains(pso::ShaderStageFlags::COMPUTE) {
self.assign(offsets.cs, value);
}
}
}
#[derive(Debug)]
pub struct DescriptorPool {
handles: Vec<Descriptor>,
allocator: RangeAllocator<DescriptorIndex>,
}
unsafe impl Send for DescriptorPool {}
unsafe impl Sync for DescriptorPool {}
impl DescriptorPool {
fn with_capacity(size: DescriptorIndex) -> Self {
DescriptorPool {
handles: vec![Descriptor(ptr::null_mut()); size as usize],
allocator: RangeAllocator::new(0 .. size),
}
}
}
impl pso::DescriptorPool<Backend> for DescriptorPool {
unsafe fn allocate_set(
&mut self,
layout: &DescriptorSetLayout,
) -> Result<DescriptorSet, pso::AllocationError> {
let len = layout.pool_mapping
.map_register(|mapping| mapping.count as DescriptorIndex)
.sum()
.max(1);
self.allocator
.allocate_range(len)
.map(|range| {
for handle in &mut self.handles[range.start as usize .. range.end as usize] {
*handle = Descriptor(ptr::null_mut());
}
DescriptorSet {
offset: range.start,
len,
handles: self.handles.as_mut_ptr().offset(range.start as _),
coherent_buffers: Mutex::new(CoherentBuffers {
flush_coherent_buffers: RefCell::new(Vec::new()),
invalidate_coherent_buffers: RefCell::new(Vec::new()),
}),
layout: DescriptorSetLayout {
bindings: Arc::clone(&layout.bindings),
pool_mapping: layout.pool_mapping.clone(),
},
}
})
.map_err(|_| pso::AllocationError::OutOfPoolMemory)
}
unsafe fn free_sets<I>(&mut self, descriptor_sets: I)
where
I: IntoIterator<Item = DescriptorSet>,
{
for set in descriptor_sets {
self.allocator
.free_range(set.offset .. (set.offset + set.len))
}
}
unsafe fn reset(&mut self) {
self.allocator.reset();
}
}
#[derive(Debug)]
pub struct RawFence {
mutex: Mutex<bool>,
condvar: Condvar,
}
pub type Fence = Arc<RawFence>;
#[derive(Debug)]
pub struct Semaphore;
#[derive(Debug)]
pub struct QueryPool;
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Backend {}
impl hal::Backend for Backend {
type Instance = Instance;
type PhysicalDevice = PhysicalDevice;
type Device = device::Device;
type Surface = Surface;
type Swapchain = Swapchain;
type QueueFamily = QueueFamily;
type CommandQueue = CommandQueue;
type CommandBuffer = CommandBuffer;
type Memory = Memory;
type CommandPool = CommandPool;
type ShaderModule = ShaderModule;
type RenderPass = RenderPass;
type Framebuffer = Framebuffer;
type Buffer = Buffer;
type BufferView = BufferView;
type Image = Image;
type ImageView = ImageView;
type Sampler = Sampler;
type ComputePipeline = ComputePipeline;
type GraphicsPipeline = GraphicsPipeline;
type PipelineLayout = PipelineLayout;
type PipelineCache = ();
type DescriptorSetLayout = DescriptorSetLayout;
type DescriptorPool = DescriptorPool;
type DescriptorSet = DescriptorSet;
type Fence = Fence;
type Semaphore = Semaphore;
type Event = ();
type QueryPool = QueryPool;
}
fn validate_line_width(width: f32) {
// Note from the Vulkan spec:
// > If the wide lines feature is not enabled, lineWidth must be 1.0
// Simply assert and no-op because DX11 never exposes `Features::LINE_WIDTH`
assert_eq!(width, 1.0);
}
| 32.412745 | 132 | 0.547401 |
e24db3696062d356d10198839400a9fd207582bc | 4,488 | use pyo3::prelude::*;
use pyo3::types::IntoPyDict;
use pyo3::types::{PyDict, PyTuple};
use pyo3::{py_run, wrap_pyfunction, AsPyRef, PyClassShell};
mod common;
#[pyclass]
struct MutRefArg {
n: i32,
}
#[pymethods]
impl MutRefArg {
fn get(&self) -> PyResult<i32> {
Ok(self.n)
}
fn set_other(&self, other: &mut MutRefArg) -> PyResult<()> {
other.n = 100;
Ok(())
}
}
#[test]
fn mut_ref_arg() {
let gil = Python::acquire_gil();
let py = gil.python();
let inst1 = Py::new(py, MutRefArg { n: 0 }).unwrap();
let inst2 = Py::new(py, MutRefArg { n: 0 }).unwrap();
let d = [("inst1", &inst1), ("inst2", &inst2)].into_py_dict(py);
py.run("inst1.set_other(inst2)", None, Some(d)).unwrap();
assert_eq!(inst2.as_ref(py).n, 100);
}
#[pyclass]
struct PyUsize {
#[pyo3(get)]
pub value: usize,
}
#[pyfunction]
fn get_zero() -> PyResult<PyUsize> {
Ok(PyUsize { value: 0 })
}
#[test]
/// Checks that we can use return a custom class in arbitrary function and use those functions
/// both in rust and python
fn return_custom_class() {
let gil = Python::acquire_gil();
let py = gil.python();
// Using from rust
assert_eq!(get_zero().unwrap().value, 0);
// Using from python
let get_zero = wrap_pyfunction!(get_zero)(py);
py_assert!(py, get_zero, "get_zero().value == 0");
}
#[test]
fn intopytuple_primitive() {
let gil = Python::acquire_gil();
let py = gil.python();
let tup = (1, 2, "foo");
py_assert!(py, tup, "tup == (1, 2, 'foo')");
py_assert!(py, tup, "tup[0] == 1");
py_assert!(py, tup, "tup[1] == 2");
py_assert!(py, tup, "tup[2] == 'foo'");
}
#[pyclass]
struct SimplePyClass {}
#[test]
fn intopytuple_pyclass() {
let gil = Python::acquire_gil();
let py = gil.python();
let tup = (
PyClassShell::new_ref(py, SimplePyClass {}).unwrap(),
PyClassShell::new_ref(py, SimplePyClass {}).unwrap(),
);
py_assert!(py, tup, "type(tup[0]).__name__ == 'SimplePyClass'");
py_assert!(py, tup, "type(tup[0]).__name__ == type(tup[1]).__name__");
py_assert!(py, tup, "tup[0] != tup[1]");
}
#[test]
fn pytuple_primitive_iter() {
let gil = Python::acquire_gil();
let py = gil.python();
let tup = PyTuple::new(py, [1u32, 2, 3].iter());
py_assert!(py, tup, "tup == (1, 2, 3)");
}
#[test]
fn pytuple_pyclass_iter() {
let gil = Python::acquire_gil();
let py = gil.python();
let tup = PyTuple::new(
py,
[
PyClassShell::new_ref(py, SimplePyClass {}).unwrap(),
PyClassShell::new_ref(py, SimplePyClass {}).unwrap(),
]
.iter(),
);
py_assert!(py, tup, "type(tup[0]).__name__ == 'SimplePyClass'");
py_assert!(py, tup, "type(tup[0]).__name__ == type(tup[0]).__name__");
py_assert!(py, tup, "tup[0] != tup[1]");
}
#[pyclass(dict, module = "test_module")]
struct PickleSupport {}
#[pymethods]
impl PickleSupport {
#[new]
fn new() -> PickleSupport {
PickleSupport {}
}
pub fn __reduce__<'py>(
slf: &'py PyClassShell<Self>,
py: Python<'py>,
) -> PyResult<(PyObject, &'py PyTuple, PyObject)> {
let cls = slf.to_object(py).getattr(py, "__class__")?;
let dict = slf.to_object(py).getattr(py, "__dict__")?;
Ok((cls, PyTuple::empty(py), dict))
}
}
fn add_module(py: Python, module: &PyModule) -> PyResult<()> {
py.import("sys")?
.dict()
.get_item("modules")
.unwrap()
.downcast_mut::<PyDict>()?
.set_item(module.name()?, module)
}
#[test]
fn test_pickle() {
let gil = Python::acquire_gil();
let py = gil.python();
let module = PyModule::new(py, "test_module").unwrap();
module.add_class::<PickleSupport>().unwrap();
add_module(py, module).unwrap();
let inst = PyClassShell::new_ref(py, PickleSupport {}).unwrap();
py_run!(
py,
inst,
r#"
inst.a = 1
assert inst.__dict__ == {'a': 1}
import pickle
inst2 = pickle.loads(pickle.dumps(inst))
assert inst2.__dict__ == {'a': 1}
"#
);
}
#[test]
fn incorrect_iter() {
let gil = Python::acquire_gil();
let py = gil.python();
let int = 13isize.to_object(py);
let int_ref = int.as_ref(py);
// Should not segfault.
assert!(int_ref.iter().is_err());
assert!(py
.eval("print('Exception state should not be set.')", None, None)
.is_ok());
}
| 24.52459 | 94 | 0.573529 |
c1c334b0a945ca3022bd527a2fa24519e166d0e8 | 843 | pub mod handlers_arc;
// <data>
use actix_web::{web, App, HttpServer, Responder};
use std::cell::Cell;
#[derive(Clone)]
struct AppState {
count: Cell<usize>,
}
async fn show_count(data: web::Data<AppState>) -> impl Responder {
format!("count: {}", data.count.get())
}
async fn add_one(data: web::Data<AppState>) -> impl Responder {
let count = data.count.get();
data.count.set(count + 1);
format!("count: {}", data.count.get())
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let data = AppState {
count: Cell::new(0),
};
HttpServer::new(move || {
App::new()
.app_data(web::Data::new(data.clone()))
.route("/", web::to(show_count))
.route("/add", web::to(add_one))
})
.bind(("127.0.0.1", 8080))?
.run()
.await
}
// </data>
| 21.615385 | 66 | 0.557533 |
9c60305242bda19bc4102e223cfd62f48a0677f7 | 7,509 | use crate::lang::execution_context::{ExecutionContext, This, ArgumentVector};
use crate::lang::errors::{CrushResult, argument_error};
use crate::lang::{value::ValueType, list::List, command::CrushCommand};
use crate::lang::value::Value;
use std::collections::HashSet;
use std::collections::HashMap;
use lazy_static::lazy_static;
use crate::lang::command::TypeMap;
fn full(name: &'static str) -> Vec<&'static str> {
vec!["global", "types", "list", name]
}
lazy_static! {
pub static ref METHODS: HashMap<String, Box<dyn CrushCommand + Sync + Send>> = {
let mut res: HashMap<String, Box<dyn CrushCommand + Send + Sync>> = HashMap::new();
res.declare(full("len"),
len, false,
"list:len",
"The number of elements in the list",
None);
res.declare(full("empty"),
empty, false,
"list:empty",
"True if there are no elements in the list",
None);
res.declare(full("push"),
push, false,
"list:push", "Push an element to the end of the list", None);
res.declare(full("pop"),
pop, false,
"list:pop", "Remove the last element from the list", None);
res.declare(full("peek"),
peek, false,
"list:peek", "Return the last element from the list", None);
res.declare(full("clear"),
clear, false,
"list:clear", "Remove all elments from the list", None);
res.declare(full("__setitem__"),
setitem, false,
"list[idx:integer] = value:any", "Assign a new value to the element at the specified index", None);
res.declare(full("remove"),
remove, false,
"list:remove idx:integer", "Remove the element at the specified index", None);
res.declare(full("insert"),
insert, false,
"list:insert idx:integer value:any", "Insert a new element at the specified index", None);
res.declare(full("truncate"),
truncate, false,
"list:truncate idx:integer", "Remove all elements past the specified index", None);
res.declare(full("clone"),
clone, false,
"list:clone", "Create a duplicate of the list", None);
res.declare(full("of"),
of, false,
"list:of element:any...",
"Create a new list containing the supplied elements",
None);
res.declare(full("new"),
new, false,
"list:new", "Create a new list with the specified element type",
Some(r#" Example:
l := ((list string):new)"#));
res.declare(full("__call_type__"),
call_type, false,
"list element_type:type", "Return a list type for the specified element type",
Some(r#" Example:
# This command returns the type 'list of integers':
list integer"#));
res.declare(full("__getitem__"),
getitem, true,
"name[idx:index]",
"Return a file or subdirectory in the specified base directory",
None);
res
};
}
fn call_type(mut context: ExecutionContext) -> CrushResult<()> {
match context.this.r#type()? {
ValueType::List(c) => {
match *c {
ValueType::Empty => {
context.arguments.check_len(1)?;
context.output.send(Value::Type(ValueType::List(Box::new(context.arguments.r#type(0)?))))
}
c => {
if context.arguments.is_empty() {
context.output.send(Value::Type(ValueType::List(Box::from(c))))
} else {
argument_error(format!("Tried to set subtype on a list that already has the subtype {}", c.to_string()).as_str())
}
}
}
}
_ => argument_error("Invalid this, expected type list"),
}
}
fn of(mut context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len_min(1)?;
let types = context.arguments.iter().map(|a| a.value.value_type()).collect::<HashSet<ValueType>>();
let lst = List::new(
if types.len() == 1 {
context.arguments[0].value.value_type()
} else {
ValueType::Any
},
context.arguments.drain(..).map(|a| a.value).collect());
context.output.send(Value::List(lst))
}
fn new(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
match context.this.r#type()? {
ValueType::List(t) => context.output.send(Value::List(List::new(*t, vec![]))),
_ => argument_error("Expected this to be a list type"),
}
}
fn len(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
context.output.send(Value::Integer(context.this.list()?.len() as i128))
}
fn empty(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
context.output.send(Value::Bool(context.this.list()?.len() == 0))
}
fn push(mut context: ExecutionContext) -> CrushResult<()> {
let l = context.this.list()?;
let mut new_elements: Vec<Value> = Vec::new();
for el in context.arguments.drain(..) {
if el.value.value_type() == l.element_type() || l.element_type() == ValueType::Any {
new_elements.push(el.value)
} else {
return argument_error("Invalid element type");
}
}
if !new_elements.is_empty() {
l.append(&mut new_elements)?;
}
context.output.send(Value::List(l))
}
fn pop(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
let o = context.output;
context.this.list()?.pop().map(|c| o.send(c));
Ok(())
}
fn peek(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
let o = context.output;
context.this.list()?.peek().map(|c| o.send(c));
Ok(())
}
fn clear(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
context.this.list()?.clear();
Ok(())
}
fn setitem(mut context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(2)?;
let list = context.this.list()?;
let key = context.arguments.integer(0)?;
let value = context.arguments.value(1)?;
list.set(key as usize, value)
}
fn remove(mut context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(1)?;
let list = context.this.list()?;
let idx = context.arguments.integer(0)?;
list.remove(idx as usize)
}
fn insert(mut context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(2)?;
let list = context.this.list()?;
let idx = context.arguments.integer(0)?;
let value = context.arguments.value(1)?;
list.insert(idx as usize, value)
}
fn truncate(mut context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(1)?;
let list = context.this.list()?;
let idx = context.arguments.integer(0)?;
list.truncate(idx as usize);
Ok(())
}
fn clone(context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(0)?;
context.output.send(Value::List(context.this.list()?.copy()))
}
fn getitem(mut context: ExecutionContext) -> CrushResult<()> {
context.arguments.check_len(1)?;
let list = context.this.list()?;
let idx = context.arguments.integer(0)?;
context.output.send(list.get(idx as usize)?)
}
| 35.419811 | 137 | 0.589692 |
1e9e9b0f98820637aeb00805aafba1ecb855614f | 1,060 | #[macro_use]
extern crate criterion;
extern crate criterion_stats as stats;
extern crate rand;
mod common_bench;
use criterion::Criterion;
macro_rules! bench {
($ty:ident) => {
pub mod $ty {
use criterion::Criterion;
use stats::univariate::Sample;
const NRESAMPLES: usize = 100_000;
const SAMPLE_SIZE: usize = 100;
pub fn mean(c: &mut Criterion) {
let v = ::common_bench::vec_sized::<$ty>(SAMPLE_SIZE).unwrap();
c.bench_function(
&format!("univariate_bootstrap_mean_{}", stringify!($ty)),
move |b| {
let sample = Sample::new(&v);
b.iter(|| sample.bootstrap(NRESAMPLES, |s| (s.mean(),)))
},
);
}
}
};
}
mod bench {
bench!(f32);
bench!(f64);
}
criterion_group!(
name = benches;
config = Criterion::default();
targets = bench::f32::mean, bench::f64::mean);
criterion_main!(benches);
| 24.090909 | 80 | 0.516981 |
8a105662ef534bdeabfd4baf64d83ac70dee90a5 | 549 | mod babel;
mod color_checker;
mod load_data;
const MAX_ERROR: f64 = 0.000000000001;
#[test]
pub fn babel_from_yxy() {
babel::run_from_yxy_tests();
}
#[test]
pub fn babel_from_xyz() {
babel::run_from_xyz_tests();
}
#[test]
pub fn babel_from_lab() {
babel::run_from_lab_tests();
}
#[test]
pub fn color_checker_from_yxy() {
color_checker::run_from_yxy_tests();
}
#[test]
pub fn color_checker_from_xyz() {
color_checker::run_from_xyz_tests();
}
#[test]
pub fn color_checker_from_lab() {
color_checker::run_from_lab_tests();
}
| 17.15625 | 40 | 0.710383 |
56f9454644ff5e33e0663c43b0a87b0a14edaf39 | 44 | mod delete;
pub use delete::DeleteCommand;
| 11 | 30 | 0.772727 |
9c19d7dc394b98b2c1f84d49837471dd81e89c95 | 891 | ///
/// 与Form和Into相似,TryForm和TryInto是用于在类型之间进行转换的特征
/// 与Form和Into不同的是 TryFrom/TryInto 用于容易出错的转换 因此返回Result S
///
use std::convert::TryFrom;
#[derive(Debug, PartialEq)]
struct Number(i32);
impl TryFrom<i32> for Number{
type Error = ();
fn try_from(value : i32) -> Result<Self, Self::Error>{
if value % 2 == 0 {
Ok(Number(value))
} else {
Err(())
}
}
}
#[test]
fn main(){
use std::convert::TryInto;
// TypeFrom
assert_eq!(Number::try_from(8), Ok(Number(8)));
assert_eq!(Number::try_from(5), Err(()));
// TypeInto
let result : Result<Number, ()> = 8i32.try_into();
assert_eq!(result, Ok(Number(8)));
println!("TypeInto result : {:?}",result);
let result_two : Result<Number, ()> = 5i32.try_into();
assert_eq!(result_two, Err(()));
println!("TypeInto result_two :{:?}",result_two);
} | 23.447368 | 58 | 0.59147 |
e882de9eb4681a442f3652254b2312ddcbad8991 | 440 | #[derive(Debug)]
pub struct Line {
pub patterns: Vec<String>,
pub outputs: Vec<String>,
}
impl Line {
pub fn num_unique_output(data: &Vec<String>) -> i64 {
let mut count = 0;
for datum in data {
count += match datum.len() {
2 => 1,
3 => 1,
4 => 1,
7 => 1,
_ => 0,
};
}
return count;
}
}
| 20 | 57 | 0.395455 |
565904fb57845ea7375d1742c35b4d06f9f17571 | 265 | use alloc::{borrow::ToOwned, vec};
use crate::{
asts::bobo::{Instruction::*, Load::*, Reg},
generate,
};
#[test]
fn zero() {
assert_eq!(
generate(vec! {
Load(Mvi(Reg::A, 0)),
}),
Ok("MVI A,0\n".to_owned())
);
}
| 15.588235 | 47 | 0.467925 |
56ea99d89945b5cb373d88982b5720d2b0e35acf | 36,235 | use std::collections::HashMap;
use serde::de::Error as DeError;
use serde::{Deserialize, Deserializer};
use super::prelude::*;
use crate::builder::{
CreateApplicationCommand,
CreateApplicationCommands,
CreateInteractionResponse,
CreateInteractionResponseFollowup,
EditInteractionResponse,
};
use crate::http::Http;
use crate::internal::prelude::{JsonMap, StdResult, Value};
use crate::model::channel::{ChannelType, PartialChannel};
use crate::model::guild::{Member, PartialMember, Role};
use crate::model::id::{
ApplicationId,
ChannelId,
CommandId,
GuildId,
InteractionId,
RoleId,
UserId,
};
use crate::model::interactions::InteractionType;
use crate::model::prelude::User;
use crate::model::utils::{
deserialize_channels_map,
deserialize_messages_map,
deserialize_options,
deserialize_options_with_resolved,
deserialize_partial_members_map,
deserialize_roles_map,
deserialize_users,
};
use crate::utils;
/// An interaction when a user invokes a slash command.
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandInteraction {
/// Id of the interaction.
pub id: InteractionId,
/// Id of the application this interaction is for.
pub application_id: ApplicationId,
/// The type of interaction.
#[serde(rename = "type")]
pub kind: InteractionType,
/// The data of the interaction which was triggered.
pub data: ApplicationCommandInteractionData,
/// The guild Id this interaction was sent from, if there is one.
#[serde(skip_serializing_if = "Option::is_none")]
pub guild_id: Option<GuildId>,
/// The channel Id this interaction was sent from.
pub channel_id: ChannelId,
/// The `member` data for the invoking user.
///
/// **Note**: It is only present if the interaction is triggered in a guild.
#[serde(skip_serializing_if = "Option::is_none")]
pub member: Option<Member>,
/// The `user` object for the invoking user.
pub user: User,
/// A continuation token for responding to the interaction.
pub token: String,
/// Always `1`.
pub version: u8,
}
impl ApplicationCommandInteraction {
/// Gets the interaction response.
///
/// # Errors
///
/// Returns an [`Error::Http`] if there is no interaction response.
///
/// [`Error::Http`]: crate::error::Error::Http
pub async fn get_interaction_response(&self, http: impl AsRef<Http>) -> Result<Message> {
http.as_ref().get_original_interaction_response(&self.token).await
}
/// Creates a response to the interaction received.
///
/// **Note**: Message contents must be under 2000 unicode code points.
///
/// # Errors
///
/// Returns an [`Error::Model`] if the message content is too long.
/// May also return an [`Error::Http`] if the API returns an error,
/// or an [`Error::Json`] if there is an error in deserializing the
/// API response.
///
/// # Errors
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn create_interaction_response<F>(&self, http: impl AsRef<Http>, f: F) -> Result<()>
where
F: FnOnce(&mut CreateInteractionResponse) -> &mut CreateInteractionResponse,
{
let mut interaction_response = CreateInteractionResponse::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref().create_interaction_response(self.id.0, &self.token, &Value::Object(map)).await
}
/// Edits the initial interaction response.
///
/// `application_id` will usually be the bot's [`UserId`], except in cases of bots being very old.
///
/// Refer to Discord's docs for Edit Webhook Message for field information.
///
/// **Note**: Message contents must be under 2000 unicode code points, does not work on ephemeral messages.
///
/// [`UserId`]: crate::model::id::UserId
///
/// # Errors
///
/// Returns [`Error::Model`] if the edited content is too long.
/// May also return [`Error::Http`] if the API returns an error,
/// or an [`Error::Json`] if there is an error deserializing the response.
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn edit_original_interaction_response<F>(
&self,
http: impl AsRef<Http>,
f: F,
) -> Result<Message>
where
F: FnOnce(&mut EditInteractionResponse) -> &mut EditInteractionResponse,
{
let mut interaction_response = EditInteractionResponse::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref().edit_original_interaction_response(&self.token, &Value::Object(map)).await
}
/// Deletes the initial interaction response.
///
/// # Errors
///
/// May return [`Error::Http`] if the API returns an error.
/// Such as if the response was already deleted.
pub async fn delete_original_interaction_response(&self, http: impl AsRef<Http>) -> Result<()> {
http.as_ref().delete_original_interaction_response(&self.token).await
}
/// Creates a followup response to the response sent.
///
/// **Note**: Message contents must be under 2000 unicode code points.
///
/// # Errors
///
/// Will return [`Error::Model`] if the content is too long.
/// May also return [`Error::Http`] if the API returns an error,
/// or a [`Error::Json`] if there is an error in deserializing the response.
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn create_followup_message<'a, F>(
&self,
http: impl AsRef<Http>,
f: F,
) -> Result<Message>
where
for<'b> F: FnOnce(
&'b mut CreateInteractionResponseFollowup<'a>,
) -> &'b mut CreateInteractionResponseFollowup<'a>,
{
let mut interaction_response = CreateInteractionResponseFollowup::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref().create_followup_message(&self.token, &Value::Object(map)).await
}
/// Edits a followup response to the response sent.
///
/// **Note**: Message contents must be under 2000 unicode code points.
///
/// # Errors
///
/// Will return [`Error::Model`] if the content is too long.
/// May also return [`Error::Http`] if the API returns an error,
/// or a [`Error::Json`] if there is an error in deserializing the response.
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn edit_followup_message<'a, F, M: Into<MessageId>>(
&self,
http: impl AsRef<Http>,
message_id: M,
f: F,
) -> Result<Message>
where
for<'b> F: FnOnce(
&'b mut CreateInteractionResponseFollowup<'a>,
) -> &'b mut CreateInteractionResponseFollowup<'a>,
{
let mut interaction_response = CreateInteractionResponseFollowup::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref()
.edit_followup_message(&self.token, message_id.into().into(), &Value::Object(map))
.await
}
/// Deletes a followup message.
///
/// # Errors
///
/// May return [`Error::Http`] if the API returns an error.
/// Such as if the response was already deleted.
pub async fn delete_followup_message<M: Into<MessageId>>(
&self,
http: impl AsRef<Http>,
message_id: M,
) -> Result<()> {
http.as_ref().delete_followup_message(&self.token, message_id.into().into()).await
}
/// Helper function to defer an interaction
///
/// # Errors
///
/// May also return an [`Error::Http`] if the API returns an error,
/// or an [`Error::Json`] if there is an error in deserializing the
/// API response.
///
/// # Errors
///
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn defer(&self, http: impl AsRef<Http>) -> Result<()> {
self.create_interaction_response(http, |f| {
f.kind(InteractionResponseType::DeferredChannelMessageWithSource)
})
.await
}
}
impl<'de> Deserialize<'de> for ApplicationCommandInteraction {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let mut map = JsonMap::deserialize(deserializer)?;
let id = map.get("guild_id").and_then(|x| x.as_str()).and_then(|x| x.parse::<u64>().ok());
if let Some(guild_id) = id {
if let Some(member) = map.get_mut("member").and_then(|x| x.as_object_mut()) {
member.insert("guild_id".to_string(), Value::Number(Number::from(guild_id)));
}
if let Some(data) = map.get_mut("data") {
if let Some(resolved) = data.get_mut("resolved") {
if let Some(roles) = resolved.get_mut("roles") {
if let Some(values) = roles.as_object_mut() {
for value in values.values_mut() {
value.as_object_mut().expect("couldn't deserialize").insert(
"guild_id".to_string(),
Value::String(guild_id.to_string()),
);
}
}
}
if let Some(channels) = resolved.get_mut("channels") {
if let Some(values) = channels.as_object_mut() {
for value in values.values_mut() {
value
.as_object_mut()
.expect("couldn't deserialize application command")
.insert(
"guild_id".to_string(),
Value::String(guild_id.to_string()),
);
}
}
}
}
}
}
let id = map
.remove("id")
.ok_or_else(|| DeError::custom("expected id"))
.and_then(InteractionId::deserialize)
.map_err(DeError::custom)?;
let application_id = map
.remove("application_id")
.ok_or_else(|| DeError::custom("expected application id"))
.and_then(ApplicationId::deserialize)
.map_err(DeError::custom)?;
let kind = map
.remove("type")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(InteractionType::deserialize)
.map_err(DeError::custom)?;
let data = map
.remove("data")
.ok_or_else(|| DeError::custom("expected data"))
.and_then(ApplicationCommandInteractionData::deserialize)
.map_err(DeError::custom)?;
let guild_id = match map.contains_key("guild_id") {
true => Some(
map.remove("guild_id")
.ok_or_else(|| DeError::custom("expected guild_id"))
.and_then(GuildId::deserialize)
.map_err(DeError::custom)?,
),
false => None,
};
let channel_id = map
.remove("channel_id")
.ok_or_else(|| DeError::custom("expected channel_id"))
.and_then(ChannelId::deserialize)
.map_err(DeError::custom)?;
let member = match map.contains_key("member") {
true => Some(
map.remove("member")
.ok_or_else(|| DeError::custom("expected member"))
.and_then(Member::deserialize)
.map_err(DeError::custom)?,
),
false => None,
};
let user = match map.contains_key("user") {
true => map
.remove("user")
.ok_or_else(|| DeError::custom("expected user"))
.and_then(User::deserialize)
.map_err(DeError::custom)?,
false => member.as_ref().expect("expected user or member").user.clone(),
};
let token = map
.remove("token")
.ok_or_else(|| DeError::custom("expected token"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
let version = map
.remove("version")
.ok_or_else(|| DeError::custom("expected version"))
.and_then(u8::deserialize)
.map_err(DeError::custom)?;
Ok(Self {
id,
application_id,
kind,
data,
guild_id,
channel_id,
member,
user,
token,
version,
})
}
}
/// The command data payload.
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandInteractionData {
/// The Id of the invoked command.
pub id: CommandId,
/// The name of the invoked command.
pub name: String,
/// The application command type of the triggered application command.
#[serde(rename = "type")]
pub kind: ApplicationCommandType,
/// The parameters and the given values.
#[serde(default)]
pub options: Vec<ApplicationCommandInteractionDataOption>,
/// The converted objects from the given options.
#[serde(default)]
pub resolved: ApplicationCommandInteractionDataResolved,
/// The targeted user or message, if the triggered application command type
/// is [`User`] or [`Message`].
///
/// Its object data can be found in the [`resolved`] field.
///
/// [`resolved`]: Self::resolved
/// [`User`]: ApplicationCommandType::User
/// [`Message`]: ApplicationCommandType::Message
pub target_id: Option<TargetId>,
/// The target resolved data of [`target_id`]
///
/// [`target_id`]: Self::target_id
#[serde(skip_serializing)]
pub target: Option<ResolvedTarget>,
}
impl<'de> Deserialize<'de> for ApplicationCommandInteractionData {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let mut map = JsonMap::deserialize(deserializer)?;
let name = map
.remove("name")
.ok_or_else(|| DeError::custom("expected value"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
let id = map
.remove("id")
.ok_or_else(|| DeError::custom("expected value"))
.and_then(CommandId::deserialize)
.map_err(DeError::custom)?;
let resolved = match map.contains_key("resolved") {
true => map
.remove("resolved")
.ok_or_else(|| DeError::custom("expected resolved"))
.and_then(ApplicationCommandInteractionDataResolved::deserialize)
.map_err(DeError::custom)?,
false => ApplicationCommandInteractionDataResolved::default(),
};
let options = match map.contains_key("options") {
true => map
.remove("options")
.ok_or_else(|| DeError::custom("expected options"))
.and_then(|deserializer| deserialize_options_with_resolved(deserializer, &resolved))
.map_err(DeError::custom)?,
false => vec![],
};
let kind = map
.remove("type")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(ApplicationCommandType::deserialize)
.map_err(DeError::custom)?;
let target_id = match kind != ApplicationCommandType::ChatInput {
true => Some(
map.remove("target_id")
.ok_or_else(|| DeError::custom("expected resolved"))
.and_then(TargetId::deserialize)
.map_err(DeError::custom)?,
),
false => None,
};
let target = match target_id {
Some(id) => {
if kind == ApplicationCommandType::Message {
let resolved = resolved
.messages
.get(&id.to_message_id())
.expect("expected message object")
.to_owned();
Some(ResolvedTarget::Message(resolved))
} else {
let user_id = id.to_user_id();
let user = resolved.users.get(&user_id).expect("expected user").to_owned();
let member = resolved.members.get(&user_id).map(|m| m.to_owned());
Some(ResolvedTarget::User(user, member))
}
},
None => None,
};
Ok(Self {
name,
id,
kind,
options,
resolved,
target_id,
target,
})
}
}
/// The resolved value of a [`ApplicationCommandInteractionData::target_id`].
#[derive(Clone, Debug, Serialize, Deserialize)]
#[non_exhaustive]
#[repr(u8)]
pub enum ResolvedTarget {
User(User, Option<PartialMember>),
Message(Message),
}
/// The resolved data of a command data interaction payload.
/// It contains the objects of [`ApplicationCommandInteractionDataOption`]s.
#[derive(Clone, Debug, Serialize, Default)]
#[non_exhaustive]
pub struct ApplicationCommandInteractionDataResolved {
/// The resolved users.
pub users: HashMap<UserId, User>,
/// The resolved partial members.
pub members: HashMap<UserId, PartialMember>,
/// The resolved roles.
pub roles: HashMap<RoleId, Role>,
/// The resolved partial channels.
pub channels: HashMap<ChannelId, PartialChannel>,
/// The resolved messages.
pub messages: HashMap<MessageId, Message>,
}
impl<'de> Deserialize<'de> for ApplicationCommandInteractionDataResolved {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let mut map = JsonMap::deserialize(deserializer)?;
let members = match map.contains_key("members") {
true => map
.remove("members")
.ok_or_else(|| DeError::custom("expected members"))
.and_then(deserialize_partial_members_map)
.map_err(DeError::custom)?,
false => HashMap::new(),
};
let users = match map.contains_key("users") {
true => map
.remove("users")
.ok_or_else(|| DeError::custom("expected users"))
.and_then(deserialize_users)
.map_err(DeError::custom)?,
false => HashMap::new(),
};
let roles = match map.contains_key("roles") {
true => map
.remove("roles")
.ok_or_else(|| DeError::custom("expected roles"))
.and_then(deserialize_roles_map)
.map_err(DeError::custom)?,
false => HashMap::new(),
};
let channels = match map.contains_key("channels") {
true => map
.remove("channels")
.ok_or_else(|| DeError::custom("expected channels"))
.and_then(deserialize_channels_map)
.map_err(DeError::custom)?,
false => HashMap::new(),
};
let messages = match map.contains_key("messages") {
true => map
.remove("messages")
.ok_or_else(|| DeError::custom("expected messages"))
.and_then(deserialize_messages_map)
.map_err(DeError::custom)?,
false => HashMap::new(),
};
Ok(Self {
users,
members,
roles,
channels,
messages,
})
}
}
/// A set of a parameter and a value from the user.
///
/// All options have names and an option can either be a parameter and input `value` or it can denote a sub-command or group, in which case it will contain a
/// top-level key and another vector of `options`.
///
/// Their resolved objects can be found on [`ApplicationCommandInteractionData::resolved`].
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandInteractionDataOption {
/// The name of the parameter.
pub name: String,
/// The given value.
pub value: Option<Value>,
/// The value type.
#[serde(rename = "type")]
pub kind: ApplicationCommandOptionType,
/// The nested options.
///
/// **Note**: It is only present if the option is
/// a group or a subcommand.
#[serde(default)]
pub options: Vec<ApplicationCommandInteractionDataOption>,
/// The resolved object of the given `value`, if there is one.
#[serde(default)]
pub resolved: Option<ApplicationCommandInteractionDataOptionValue>,
}
impl<'de> Deserialize<'de> for ApplicationCommandInteractionDataOption {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let mut map = JsonMap::deserialize(deserializer)?;
let name = map
.remove("name")
.ok_or_else(|| DeError::custom("expected value"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
let value = match map.contains_key("value") {
true => Some(
map.remove("value")
.ok_or_else(|| DeError::custom("expected value"))
.and_then(Value::deserialize)
.map_err(DeError::custom)?,
),
false => None,
};
let kind = map
.remove("type")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(ApplicationCommandOptionType::deserialize)
.map_err(DeError::custom)?;
let options = match map.contains_key("options") {
true => map
.remove("options")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(deserialize_options)
.map_err(DeError::custom)?,
false => vec![],
};
Ok(Self {
name,
value,
kind,
options,
resolved: None,
})
}
}
/// The resolved value of an [`ApplicationCommandInteractionDataOption`].
#[derive(Clone, Debug, Serialize, Deserialize)]
#[non_exhaustive]
#[repr(u8)]
pub enum ApplicationCommandInteractionDataOptionValue {
String(String),
Integer(i64),
Boolean(bool),
User(User, Option<PartialMember>),
Channel(PartialChannel),
Role(Role),
Number(f64),
}
fn default_permission_value() -> bool {
true
}
/// The base command model that belongs to an application.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommand {
/// The command Id.
pub id: CommandId,
/// The application command kind.
#[serde(rename = "type")]
pub kind: ApplicationCommandType,
/// The parent application Id.
pub application_id: ApplicationId,
/// The command guild Id, if it is a guild command.
///
/// **Note**: It may only be present if it is received through the gateway.
pub guild_id: Option<GuildId>,
/// The command name.
pub name: String,
/// The command description.
pub description: String,
/// The parameters for the command.
#[serde(default)]
pub options: Vec<ApplicationCommandOption>,
/// Whether the command is enabled by default when
/// the application is added to a guild.
#[serde(default = "self::default_permission_value")]
pub default_permission: bool,
/// An autoincremented version identifier updated during substantial record changes.
pub version: CommandVersionId,
}
impl ApplicationCommand {
/// Creates a global [`ApplicationCommand`],
/// overriding an existing one with the same name if it exists.
///
/// When a created [`ApplicationCommand`] is used, the [`InteractionCreate`] event will be emitted.
///
/// **Note**: Global commands may take up to an hour to be updated in the user slash commands
/// list. If an outdated command data is sent by a user, discord will consider it as an error
/// and then will instantly update that command.
///
/// As such, it is recommended that guild application commands be used for testing purposes.
///
/// # Examples
///
/// Create a simple ping command:
///
/// ```rust,no_run
/// # use serenity::http::Http;
/// # use std::sync::Arc;
/// #
/// # async fn run() {
/// # let http = Arc::new(Http::default());
/// use serenity::model::{interactions::application_command::ApplicationCommand, id::ApplicationId};
///
/// let _ = ApplicationCommand::create_global_application_command(&http, |command| {
/// command.name("ping")
/// .description("A simple ping command")
/// })
/// .await;
/// # }
/// ```
///
/// Create a command that echoes what is inserted:
///
/// ```rust,no_run
/// # use serenity::http::Http;
/// # use std::sync::Arc;
/// #
/// # async fn run() {
/// # let http = Arc::new(Http::default());
/// use serenity::model::{
/// interactions::application_command::{ApplicationCommand, ApplicationCommandOptionType},
/// id::ApplicationId
/// };
///
/// let _ = ApplicationCommand::create_global_application_command(&http, |command| {
/// command.name("echo")
/// .description("Makes the bot send a message")
/// .create_option(|option| {
/// option.name("message")
/// .description("The message to send")
/// .kind(ApplicationCommandOptionType::String)
/// .required(true)
/// })
/// })
/// .await;
/// # }
/// ```
///
/// # Errors
///
/// May return an [`Error::Http`] if the [`ApplicationCommand`] is illformed,
/// such as if more than 10 [`choices`] are set. See the [API Docs] for further details.
///
/// Can also return an [`Error::Json`] if there is an error in deserializing
/// the response.
///
/// [`ApplicationCommand`]: crate::model::interactions::application_command::ApplicationCommand
/// [`InteractionCreate`]: crate::client::EventHandler::interaction_create
/// [API Docs]: https://discord.com/developers/docs/interactions/slash-commands
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
/// [`choices`]: crate::model::interactions::application_command::ApplicationCommandOption::choices
pub async fn create_global_application_command<F>(
http: impl AsRef<Http>,
f: F,
) -> Result<ApplicationCommand>
where
F: FnOnce(&mut CreateApplicationCommand) -> &mut CreateApplicationCommand,
{
let map = ApplicationCommand::build_application_command(f);
http.as_ref().create_global_application_command(&Value::Object(map)).await
}
/// Overrides all global application commands.
///
/// [`create_global_application_command`]: Self::create_global_application_command
pub async fn set_global_application_commands<F>(
http: impl AsRef<Http>,
f: F,
) -> Result<Vec<ApplicationCommand>>
where
F: FnOnce(&mut CreateApplicationCommands) -> &mut CreateApplicationCommands,
{
let mut array = CreateApplicationCommands::default();
f(&mut array);
http.as_ref().create_global_application_commands(&Value::Array(array.0)).await
}
/// Edits a global command by its Id.
pub async fn edit_global_application_command<F>(
http: impl AsRef<Http>,
command_id: CommandId,
f: F,
) -> Result<ApplicationCommand>
where
F: FnOnce(&mut CreateApplicationCommand) -> &mut CreateApplicationCommand,
{
let map = ApplicationCommand::build_application_command(f);
http.as_ref().edit_global_application_command(command_id.into(), &Value::Object(map)).await
}
/// Gets all global commands.
pub async fn get_global_application_commands(
http: impl AsRef<Http>,
) -> Result<Vec<ApplicationCommand>> {
http.as_ref().get_global_application_commands().await
}
/// Gets a global command by its Id.
pub async fn get_global_application_command(
http: impl AsRef<Http>,
command_id: CommandId,
) -> Result<ApplicationCommand> {
http.as_ref().get_global_application_command(command_id.into()).await
}
/// Deletes a global command by its Id.
pub async fn delete_global_application_command(
http: impl AsRef<Http>,
command_id: CommandId,
) -> Result<()> {
http.as_ref().delete_global_application_command(command_id.into()).await
}
#[inline]
pub(crate) fn build_application_command<F>(f: F) -> Map<String, Value>
where
F: FnOnce(&mut CreateApplicationCommand) -> &mut CreateApplicationCommand,
{
let mut create_application_command = CreateApplicationCommand::default();
f(&mut create_application_command);
utils::hashmap_to_json_map(create_application_command.0)
}
}
/// The type of an application command.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
#[repr(u8)]
pub enum ApplicationCommandType {
ChatInput = 1,
User = 2,
Message = 3,
Unknown = !0,
}
enum_number!(ApplicationCommandType {
ChatInput,
User,
Message
});
/// The parameters for an [`ApplicationCommand`].
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandOption {
/// The option type.
#[serde(rename = "type")]
pub kind: ApplicationCommandOptionType,
/// The option name.
pub name: String,
/// The option description.
pub description: String,
/// Whether the parameter is optional or required.
#[serde(default)]
pub required: bool,
/// Choices the user can pick from.
///
/// **Note**: Only available for [`String`] and [`Integer`] [`ApplicationCommandOptionType`].
///
/// [`String`]: ApplicationCommandOptionType::String
/// [`Integer`]: ApplicationCommandOptionType::Integer
#[serde(default)]
pub choices: Vec<ApplicationCommandOptionChoice>,
/// The nested options.
///
/// **Note**: Only available for [`SubCommand`] or [`SubCommandGroup`].
///
/// [`SubCommand`]: ApplicationCommandOptionType::SubCommand
/// [`SubCommandGroup`]: ApplicationCommandOptionType::SubCommandGroup
#[serde(default)]
pub options: Vec<ApplicationCommandOption>,
/// If the option is a [`Channel`], it will only be able to show these types.
///
/// [`Channel`]: ApplicationCommandOptionType::Channel
#[serde(default)]
pub channel_types: Vec<ChannelType>,
}
/// An [`ApplicationCommand`] permission.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandPermission {
/// The id of the command.
pub id: CommandId,
/// The id of the application the command belongs to.
pub application_id: ApplicationId,
/// The id of the guild.
pub guild_id: GuildId,
/// The permissions for the command in the guild.
pub permissions: Vec<ApplicationCommandPermissionData>,
}
/// The [`ApplicationCommandPermission`] data.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandPermissionData {
/// The [`RoleId`] or [`UserId`], depends on `kind` value.
///
/// [`RoleId`]: crate::model::id::RoleId
/// [`UserId`]: crate::model::id::UserId
pub id: CommandPermissionId,
/// The type of data this permissions applies to.
#[serde(rename = "type")]
pub kind: ApplicationCommandPermissionType,
/// Whether or not the provided data can use the command or not.
pub permission: bool,
}
impl CommandPermissionId {
/// Converts this [`CommandPermissionId`] to [`UserId`].
pub fn to_user_id(self) -> UserId {
self.0.into()
}
/// Converts this [`CommandPermissionId`] to [`RoleId`].
pub fn to_role_id(self) -> RoleId {
self.0.into()
}
}
impl From<RoleId> for CommandPermissionId {
fn from(id: RoleId) -> Self {
Self(id.0)
}
}
impl<'a> From<&'a RoleId> for CommandPermissionId {
fn from(id: &RoleId) -> Self {
Self(id.0)
}
}
impl From<UserId> for CommandPermissionId {
fn from(id: UserId) -> Self {
Self(id.0)
}
}
impl<'a> From<&'a UserId> for CommandPermissionId {
fn from(id: &UserId) -> Self {
Self(id.0)
}
}
impl From<CommandPermissionId> for RoleId {
fn from(id: CommandPermissionId) -> Self {
Self(id.0)
}
}
impl From<CommandPermissionId> for UserId {
fn from(id: CommandPermissionId) -> Self {
Self(id.0)
}
}
impl TargetId {
/// Converts this [`CommandPermissionId`] to [`UserId`].
pub fn to_user_id(self) -> UserId {
self.0.into()
}
/// Converts this [`CommandPermissionId`] to [`MessageId`].
pub fn to_message_id(self) -> MessageId {
self.0.into()
}
}
impl From<MessageId> for TargetId {
fn from(id: MessageId) -> Self {
Self(id.0)
}
}
impl<'a> From<&'a MessageId> for TargetId {
fn from(id: &MessageId) -> Self {
Self(id.0)
}
}
impl From<UserId> for TargetId {
fn from(id: UserId) -> Self {
Self(id.0)
}
}
impl<'a> From<&'a UserId> for TargetId {
fn from(id: &UserId) -> Self {
Self(id.0)
}
}
impl From<TargetId> for MessageId {
fn from(id: TargetId) -> Self {
Self(id.0)
}
}
impl From<TargetId> for UserId {
fn from(id: TargetId) -> Self {
Self(id.0)
}
}
/// The type of an [`ApplicationCommandOption`].
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
#[repr(u8)]
pub enum ApplicationCommandOptionType {
SubCommand = 1,
SubCommandGroup = 2,
String = 3,
Integer = 4,
Boolean = 5,
User = 6,
Channel = 7,
Role = 8,
Mentionable = 9,
Number = 10,
Unknown = !0,
}
enum_number!(ApplicationCommandOptionType {
SubCommand,
SubCommandGroup,
String,
Integer,
Boolean,
User,
Channel,
Role,
Mentionable,
Number
});
/// The type of an [`ApplicationCommandPermissionData`].
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
#[repr(u8)]
pub enum ApplicationCommandPermissionType {
Role = 1,
User = 2,
Unknown = !0,
}
enum_number!(ApplicationCommandPermissionType {
Role,
User
});
/// The only valid values a user can pick in an [`ApplicationCommandOption`].
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct ApplicationCommandOptionChoice {
/// The choice name.
pub name: String,
/// The choice value.
pub value: Value,
}
| 32.970883 | 157 | 0.59288 |
de39823aa6e099345d583049af905882d7dc8741 | 2,823 | use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::thread;
use std::time::{Duration, Instant};
use lazy_static::lazy_static;
// OpenMetrics require unix epoch timestamps
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps-2
pub fn epoch_timestamp() -> f64 {
use std::time::SystemTime;
let d = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default();
let nanos = f64::from(d.subsec_millis()) / 1e3;
d.as_secs() as f64 + nanos
}
/// Milliseconds since ANCHOR.
static RECENT: AtomicU64 = AtomicU64::new(0);
lazy_static! {
static ref ANCHOR: Instant = Instant::now();
}
/// Convert a duration to millisecond.
#[inline]
pub fn duration_to_millis(dur: Duration) -> u64 {
dur.as_secs() * 1000 + dur.subsec_millis() as u64
}
/// Returns milliseconds since ANCHOR.
///
/// ANCHOR is some fixed point in history.
pub fn now_millis() -> u64 {
let res = Instant::now();
let t = duration_to_millis(res.saturating_duration_since(*ANCHOR));
let mut recent = RECENT.load(Ordering::Relaxed);
loop {
if recent > t {
return recent;
}
match RECENT.compare_exchange_weak(recent, t, Ordering::Relaxed, Ordering::Relaxed) {
Ok(_) => return t,
Err(r) => recent = r,
}
}
}
/// Returns recent returned value by `now_millis`.
pub fn recent_millis() -> u64 {
RECENT.load(Ordering::Relaxed)
}
lazy_static! {
static ref UPDATER_IS_RUNNING: AtomicBool = AtomicBool::new(false);
}
const CHECK_UPDATE_INTERVAL: Duration = Duration::from_millis(200);
/// Ensures background updater is running, which will call `now_millis` periodically.
pub fn ensure_updater() {
if UPDATER_IS_RUNNING
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.is_ok()
{
std::thread::Builder::new()
.name("time updater".to_owned())
.spawn(|| loop {
thread::sleep(CHECK_UPDATE_INTERVAL);
now_millis();
})
.unwrap();
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::time::Duration;
#[test]
fn test_duration_to_millis() {
let cases = vec![(1, 1, 1000), (0, 1_000_000, 1), (3, 103_000_000, 3103)];
for (secs, nanos, exp) in cases {
let dur = Duration::new(secs, nanos);
assert_eq!(super::duration_to_millis(dur), exp);
}
}
#[test]
fn test_time_update() {
assert_eq!(super::recent_millis(), 0);
let now = super::now_millis();
assert_eq!(super::recent_millis(), now);
super::ensure_updater();
thread::sleep(super::CHECK_UPDATE_INTERVAL * 2);
assert!(super::recent_millis() > now);
}
}
| 28.806122 | 103 | 0.623096 |
edb33fe6e2b13a06dc1f822fa8fadd68f1496691 | 1,092 | use crate::spec::{LinkerFlavor, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::apple_base::opts("macos");
base.cpu = "core2".to_string();
base.max_atomic_width = Some(128); // core2 support cmpxchg16b
base.eliminate_frame_pointer = false;
base.pre_link_args.insert(
LinkerFlavor::Gcc,
vec!["-m64".to_string(), "-arch".to_string(), "x86_64".to_string()],
);
base.link_env_remove.extend(super::apple_base::macos_link_env_remove());
base.stack_probes = true;
// Clang automatically chooses a more specific target based on
// MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
// correctly, we do too.
let arch = "x86_64";
let llvm_target = super::apple_base::macos_llvm_target(&arch);
Target {
llvm_target,
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
arch: arch.to_string(),
options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base },
}
}
| 36.4 | 93 | 0.648352 |
ff6e3babfea761ece467bf61dbb2a026d4f760a1 | 1,497 | use std::env;
use ipnetwork::Ipv4Network;
use rtnetlink::{new_connection, Error, Handle};
#[tokio::main]
async fn main() -> Result<(), ()> {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
usage();
return Ok(());
}
let dest: Ipv4Network = args[1].parse().unwrap_or_else(|_| {
eprintln!("invalid destination");
std::process::exit(1);
});
let gateway: Ipv4Network = args[2].parse().unwrap_or_else(|_| {
eprintln!("invalid gateway");
std::process::exit(1);
});
let (connection, handle, _) = new_connection().unwrap();
tokio::spawn(connection);
if let Err(e) = add_route(&dest, &gateway, handle.clone()).await {
eprintln!("{}", e);
}
Ok(())
}
async fn add_route(dest: &Ipv4Network, gateway: &Ipv4Network, handle: Handle) -> Result<(), Error> {
let route = handle.route();
route
.add_v4()
.destination_prefix(dest.ip(), dest.prefix())
.gateway(gateway.ip())
.execute()
.await?;
Ok(())
}
fn usage() {
eprintln!(
"usage:
cargo run --example add_route -- <destination>/<prefix_length> <gateway>
Note that you need to run this program as root. Instead of running cargo as root,
build the example normally:
cd rtnetlink ; cargo build --example add_route
Then find the binary in the target directory:
cd ../target/debug/example ; sudo ./add_route <destination>/<prefix_length> <gateway>"
);
}
| 25.810345 | 100 | 0.599198 |
5668e1c467fdad2062a670743bdade82da62080c | 249 | //!
//! All expression group definitions.
//!
mod action_group;
mod combinator;
mod expr_group;
mod group_determiner;
mod types;
pub use action_group::*;
pub use combinator::*;
pub use expr_group::*;
pub use group_determiner::*;
pub use types::*;
| 15.5625 | 37 | 0.722892 |
50939a966041932cc063f729d66e1443fbc6a7d7 | 10,165 | /* automatically generated by rust-bindgen 0.55.1 */
#![allow(non_camel_case_types, non_snake_case)]
pub const SPIRO_CORNER: u8 = 118u8;
pub const SPIRO_G4: u8 = 111u8;
pub const SPIRO_G2: u8 = 99u8;
pub const SPIRO_LEFT: u8 = 91u8;
pub const SPIRO_RIGHT: u8 = 93u8;
pub const SPIRO_END: u8 = 122u8;
pub const SPIRO_OPEN_CONTOUR: u8 = 123u8;
pub const SPIRO_END_OPEN_CONTOUR: u8 = 125u8;
pub const SPIRO_ANCHOR: u8 = 97u8;
pub const SPIRO_HANDLE: u8 = 104u8;
pub const SPIRO_INCLUDE_LAST_KNOT: u32 = 256;
pub const SPIRO_RETRO_VER1: u32 = 1024;
pub const SPIRO_REVERSE_SRC: u32 = 2048;
pub const SPIRO_ARC_CUB_QUAD_CLR: u32 = 32767;
pub const SPIRO_ARC_CUB_QUAD_MASK: u32 = 28672;
pub const SPIRO_CUBIC_TO_BEZIER: u32 = 0;
pub const SPIRO_CUBIC_MIN_MAYBE: u32 = 4096;
pub const SPIRO_ARC_MAYBE: u32 = 8192;
pub const SPIRO_ARC_MIN_MAYBE: u32 = 12288;
pub const SPIRO_QUAD0_TO_BEZIER: u32 = 16384;
pub type bezctx = _bezctx;
extern "C" {
pub fn bezctx_moveto(bc: *mut bezctx, x: f64, y: f64, is_open: ::std::os::raw::c_int);
}
extern "C" {
pub fn bezctx_lineto(bc: *mut bezctx, x: f64, y: f64);
}
extern "C" {
pub fn bezctx_quadto(bc: *mut bezctx, x1: f64, y1: f64, x2: f64, y2: f64);
}
extern "C" {
pub fn bezctx_curveto(bc: *mut bezctx, x1: f64, y1: f64, x2: f64, y2: f64, x3: f64, y3: f64);
}
extern "C" {
pub fn bezctx_mark_knot(bc: *mut bezctx, knot_idx: ::std::os::raw::c_int);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct _bezctx {
pub moveto: ::std::option::Option<
unsafe extern "C" fn(bc: *mut bezctx, x: f64, y: f64, is_open: ::std::os::raw::c_int),
>,
pub lineto: ::std::option::Option<unsafe extern "C" fn(bc: *mut bezctx, x: f64, y: f64)>,
pub quadto: ::std::option::Option<
unsafe extern "C" fn(bc: *mut bezctx, x1: f64, y1: f64, x2: f64, y2: f64),
>,
pub curveto: ::std::option::Option<
unsafe extern "C" fn(bc: *mut bezctx, x1: f64, y1: f64, x2: f64, y2: f64, x3: f64, y3: f64),
>,
pub mark_knot: ::std::option::Option<
unsafe extern "C" fn(bc: *mut bezctx, knot_idx: ::std::os::raw::c_int),
>,
}
#[test]
fn bindgen_test_layout__bezctx() {
assert_eq!(
::std::mem::size_of::<_bezctx>(),
40usize,
concat!("Size of: ", stringify!(_bezctx))
);
assert_eq!(
::std::mem::align_of::<_bezctx>(),
8usize,
concat!("Alignment of ", stringify!(_bezctx))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bezctx>())).moveto as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_bezctx),
"::",
stringify!(moveto)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bezctx>())).lineto as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_bezctx),
"::",
stringify!(lineto)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bezctx>())).quadto as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(_bezctx),
"::",
stringify!(quadto)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bezctx>())).curveto as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(_bezctx),
"::",
stringify!(curveto)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bezctx>())).mark_knot as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(_bezctx),
"::",
stringify!(mark_knot)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct spiro_cp {
pub x: f64,
pub y: f64,
pub ty: ::std::os::raw::c_char,
}
#[test]
fn bindgen_test_layout_spiro_cp() {
assert_eq!(
::std::mem::size_of::<spiro_cp>(),
24usize,
concat!("Size of: ", stringify!(spiro_cp))
);
assert_eq!(
::std::mem::align_of::<spiro_cp>(),
8usize,
concat!("Alignment of ", stringify!(spiro_cp))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_cp>())).x as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(spiro_cp),
"::",
stringify!(x)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_cp>())).y as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(spiro_cp),
"::",
stringify!(y)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_cp>())).ty as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(spiro_cp),
"::",
stringify!(ty)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct spiro_seg_s {
pub x: f64,
pub y: f64,
pub ty: ::std::os::raw::c_char,
pub bend_th: f64,
pub ks: [f64; 4usize],
pub seg_ch: f64,
pub seg_th: f64,
pub l: f64,
}
#[test]
fn bindgen_test_layout_spiro_seg_s() {
assert_eq!(
::std::mem::size_of::<spiro_seg_s>(),
88usize,
concat!("Size of: ", stringify!(spiro_seg_s))
);
assert_eq!(
::std::mem::align_of::<spiro_seg_s>(),
8usize,
concat!("Alignment of ", stringify!(spiro_seg_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).x as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(x)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).y as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(y)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).ty as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(ty)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).bend_th as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(bend_th)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).ks as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(ks)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).seg_ch as *const _ as usize },
64usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(seg_ch)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).seg_th as *const _ as usize },
72usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(seg_th)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<spiro_seg_s>())).l as *const _ as usize },
80usize,
concat!(
"Offset of field: ",
stringify!(spiro_seg_s),
"::",
stringify!(l)
)
);
}
pub type spiro_seg = spiro_seg_s;
extern "C" {
pub fn run_spiro(src: *const spiro_cp, n: ::std::os::raw::c_int) -> *mut spiro_seg;
}
extern "C" {
pub fn free_spiro(s: *mut spiro_seg);
}
extern "C" {
pub fn spiro_to_bpath(s: *const spiro_seg, n: ::std::os::raw::c_int, bc: *mut bezctx);
}
extern "C" {
pub fn get_knot_th(s: *const spiro_seg, i: ::std::os::raw::c_int) -> f64;
}
extern "C" {
pub fn run_spiro0(
src: *const spiro_cp,
dm: *mut f64,
ncq: ::std::os::raw::c_int,
n: ::std::os::raw::c_int,
) -> *mut spiro_seg;
}
extern "C" {
pub fn spiro_to_bpath0(
src: *const spiro_cp,
s: *const spiro_seg,
dm: *mut f64,
ncq: ::std::os::raw::c_int,
n: ::std::os::raw::c_int,
bc: *mut bezctx,
);
}
extern "C" {
pub fn spiroreverse(src: *mut spiro_cp, n: ::std::os::raw::c_int) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn LibSpiroVersion() -> *const ::std::os::raw::c_char;
}
extern "C" {
pub fn TaggedSpiroCPsToBezier(spiros: *mut spiro_cp, bc: *mut bezctx);
}
extern "C" {
pub fn SpiroCPsToBezier(
spiros: *mut spiro_cp,
n: ::std::os::raw::c_int,
isclosed: ::std::os::raw::c_int,
bc: *mut bezctx,
);
}
extern "C" {
pub fn TaggedSpiroCPsToBezier0(spiros: *mut spiro_cp, bc: *mut bezctx)
-> ::std::os::raw::c_int;
}
extern "C" {
pub fn SpiroCPsToBezier0(
spiros: *mut spiro_cp,
n: ::std::os::raw::c_int,
isclosed: ::std::os::raw::c_int,
bc: *mut bezctx,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn TaggedSpiroCPsToBezier1(
spiros: *mut spiro_cp,
bc: *mut bezctx,
done: *mut ::std::os::raw::c_int,
);
}
extern "C" {
pub fn SpiroCPsToBezier1(
spiros: *mut spiro_cp,
n: ::std::os::raw::c_int,
isclosed: ::std::os::raw::c_int,
bc: *mut bezctx,
done: *mut ::std::os::raw::c_int,
);
}
extern "C" {
pub fn TaggedSpiroCPsToBezier2(
spiros: *mut spiro_cp,
ncq: ::std::os::raw::c_int,
bc: *mut bezctx,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn SpiroCPsToBezier2(
spiros: *mut spiro_cp,
n: ::std::os::raw::c_int,
ncq: ::std::os::raw::c_int,
isclosed: ::std::os::raw::c_int,
bc: *mut bezctx,
) -> ::std::os::raw::c_int;
}
| 27.622283 | 100 | 0.515789 |
4a14ef6d5fc6cf715b14db73eb93bb4ae8f7dc69 | 626 | // Copyright 2019 Steven Bosnick
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE-2.0 or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms
use sel4_build::{CMakeTarget, Platform};
fn main() {
let target = CMakeTarget::Kernel(Platform::Hikey);
// build the kernel for the Hikey platform
target.build();
// generate the rust bindings to the platform specifc parts of the sel4 library
target.bindgen();
}
| 31.3 | 83 | 0.717252 |
71b657cc40a940b95a24703d280103319bbebcac | 64,697 | #![allow(unused_parens)]
//! # Object Detection
//!
//! Haar Feature-based Cascade Classifier for Object Detection
//! ----------------------------------------------------------
//!
//! The object detector described below has been initially proposed by Paul Viola [Viola01](https://docs.opencv.org/3.2.0/d0/de3/citelist.html#CITEREF_Viola01) and
//! improved by Rainer Lienhart [Lienhart02](https://docs.opencv.org/3.2.0/d0/de3/citelist.html#CITEREF_Lienhart02) .
//!
//! First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is
//! trained with a few hundred sample views of a particular object (i.e., a face or a car), called
//! positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary
//! images of the same size.
//!
//! After a classifier is trained, it can be applied to a region of interest (of the same size as used
//! during the training) in an input image. The classifier outputs a "1" if the region is likely to show
//! the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can
//! move the search window across the image and check every location using the classifier. The
//! classifier is designed so that it can be easily "resized" in order to be able to find the objects of
//! interest at different sizes, which is more efficient than resizing the image itself. So, to find an
//! object of an unknown size in the image the scan procedure should be done several times at different
//! scales.
//!
//! The word "cascade" in the classifier name means that the resultant classifier consists of several
//! simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some
//! stage the candidate is rejected or all the stages are passed. The word "boosted" means that the
//! classifiers at every stage of the cascade are complex themselves and they are built out of basic
//! classifiers using one of four different boosting techniques (weighted voting). Currently Discrete
//! Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are
//! decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic
//! classifiers, and are calculated as described below. The current algorithm uses the following
//! Haar-like features:
//!
//! 
//!
//! The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within
//! the region of interest and the scale (this scale is not the same as the scale used at the detection
//! stage, though these two scales are multiplied). For example, in the case of the third line feature
//! (2c) the response is calculated as the difference between the sum of image pixels under the
//! rectangle covering the whole feature (including the two white stripes and the black stripe in the
//! middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to
//! compensate for the differences in the size of areas. The sums of pixel values over a rectangular
//! regions are calculated rapidly using integral images (see below and the integral description).
//!
//! To see the object detector at work, have a look at the facedetect demo:
//! <https://github.com/opencv/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>
//!
//! The following reference is for the detection part only. There is a separate application called
//! opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
//!
//!
//! Note: In the new C++ interface it is also possible to use LBP (local binary pattern) features in
//! addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
//! using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
//! <http://research.microsoft.com/en-us/um/people/viola/Pubs/Detect/violaJones_CVPR2001.pdf>
//! # C API
use crate::{mod_prelude::*, core, sys, types};
pub mod prelude {
pub use { super::SimilarRectsTrait, super::BaseCascadeClassifier_MaskGenerator, super::BaseCascadeClassifier, super::CascadeClassifierTrait, super::DetectionROITrait, super::HOGDescriptorTrait, super::DetectionBasedTracker_ParametersTrait, super::DetectionBasedTracker_IDetector, super::DetectionBasedTracker_ExtObjectTrait, super::DetectionBasedTrackerTrait };
}
pub const CASCADE_DO_CANNY_PRUNING: i32 = 1;
pub const CASCADE_DO_ROUGH_SEARCH: i32 = 8;
pub const CASCADE_FIND_BIGGEST_OBJECT: i32 = 4;
pub const CASCADE_SCALE_IMAGE: i32 = 2;
pub const CV_HAAR_DO_CANNY_PRUNING: i32 = 1;
pub const CV_HAAR_DO_ROUGH_SEARCH: i32 = 8;
pub const CV_HAAR_FEATURE_MAX: i32 = 3;
pub const CV_HAAR_FIND_BIGGEST_OBJECT: i32 = 4;
pub const CV_HAAR_MAGIC_VAL: i32 = 0x42500000;
pub const CV_HAAR_SCALE_IMAGE: i32 = 2;
pub const CV_TYPE_NAME_HAAR: &'static str = "opencv-haar-classifier";
pub const HOGDescriptor_DEFAULT_NLEVELS: i32 = 64;
pub const HOGDescriptor_L2Hys: i32 = 0;
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum DetectionBasedTracker_ObjectStatus {
DETECTED_NOT_SHOWN_YET = 0,
DETECTED = 1,
DETECTED_TEMPORARY_LOST = 2,
WRONG_OBJECT = 3,
}
opencv_type_enum! { crate::objdetect::DetectionBasedTracker_ObjectStatus }
pub fn create_face_detection_mask_generator() -> Result<core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>> {
unsafe { sys::cv_createFaceDetectionMaskGenerator() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>::opencv_from_extern(r) } )
}
/// ## C++ default parameters
/// * detect_threshold: 0.0
/// * win_det_size: Size(64,128)
pub fn group_rectangles_meanshift(rect_list: &mut core::Vector::<core::Rect>, found_weights: &mut core::Vector::<f64>, found_scales: &mut core::Vector::<f64>, detect_threshold: f64, win_det_size: core::Size) -> Result<()> {
unsafe { sys::cv_groupRectangles_meanshift_vector_Rect_R_vector_double_R_vector_double_R_double_Size(rect_list.as_raw_mut_VectorOfRect(), found_weights.as_raw_mut_VectorOff64(), found_scales.as_raw_mut_VectorOff64(), detect_threshold, win_det_size.opencv_as_extern()) }.into_result()
}
/// Groups the object candidate rectangles.
///
/// ## Parameters
/// * rectList: Input/output vector of rectangles. Output vector includes retained and grouped
/// rectangles. (The Python list is not modified in place.)
/// * groupThreshold: Minimum possible number of rectangles minus 1. The threshold is used in a
/// group of rectangles to retain it.
/// * eps: Relative difference between sides of the rectangles to merge them into a group.
///
/// The function is a wrapper for the generic function partition . It clusters all the input rectangles
/// using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
/// locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
///  , all the rectangles are put in one cluster. Then, the small
/// clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
/// cluster, the average rectangle is computed and put into the output rectangle list.
///
/// ## C++ default parameters
/// * eps: 0.2
pub fn group_rectangles(rect_list: &mut core::Vector::<core::Rect>, group_threshold: i32, eps: f64) -> Result<()> {
unsafe { sys::cv_groupRectangles_vector_Rect_R_int_double(rect_list.as_raw_mut_VectorOfRect(), group_threshold, eps) }.into_result()
}
/// Groups the object candidate rectangles.
///
/// ## Parameters
/// * rectList: Input/output vector of rectangles. Output vector includes retained and grouped
/// rectangles. (The Python list is not modified in place.)
/// * groupThreshold: Minimum possible number of rectangles minus 1. The threshold is used in a
/// group of rectangles to retain it.
/// * eps: Relative difference between sides of the rectangles to merge them into a group.
///
/// The function is a wrapper for the generic function partition . It clusters all the input rectangles
/// using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
/// locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
///  , all the rectangles are put in one cluster. Then, the small
/// clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
/// cluster, the average rectangle is computed and put into the output rectangle list.
///
/// ## Overloaded parameters
pub fn group_rectangles_levelweights(rect_list: &mut core::Vector::<core::Rect>, group_threshold: i32, eps: f64, weights: &mut core::Vector::<i32>, level_weights: &mut core::Vector::<f64>) -> Result<()> {
unsafe { sys::cv_groupRectangles_vector_Rect_R_int_double_vector_int_X_vector_double_X(rect_list.as_raw_mut_VectorOfRect(), group_threshold, eps, weights.as_raw_mut_VectorOfi32(), level_weights.as_raw_mut_VectorOff64()) }.into_result()
}
/// Groups the object candidate rectangles.
///
/// ## Parameters
/// * rectList: Input/output vector of rectangles. Output vector includes retained and grouped
/// rectangles. (The Python list is not modified in place.)
/// * groupThreshold: Minimum possible number of rectangles minus 1. The threshold is used in a
/// group of rectangles to retain it.
/// * eps: Relative difference between sides of the rectangles to merge them into a group.
///
/// The function is a wrapper for the generic function partition . It clusters all the input rectangles
/// using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
/// locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
///  , all the rectangles are put in one cluster. Then, the small
/// clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
/// cluster, the average rectangle is computed and put into the output rectangle list.
///
/// ## Overloaded parameters
///
/// ## C++ default parameters
/// * eps: 0.2
pub fn group_rectangles_weights(rect_list: &mut core::Vector::<core::Rect>, weights: &mut core::Vector::<i32>, group_threshold: i32, eps: f64) -> Result<()> {
unsafe { sys::cv_groupRectangles_vector_Rect_R_vector_int_R_int_double(rect_list.as_raw_mut_VectorOfRect(), weights.as_raw_mut_VectorOfi32(), group_threshold, eps) }.into_result()
}
/// Groups the object candidate rectangles.
///
/// ## Parameters
/// * rectList: Input/output vector of rectangles. Output vector includes retained and grouped
/// rectangles. (The Python list is not modified in place.)
/// * groupThreshold: Minimum possible number of rectangles minus 1. The threshold is used in a
/// group of rectangles to retain it.
/// * eps: Relative difference between sides of the rectangles to merge them into a group.
///
/// The function is a wrapper for the generic function partition . It clusters all the input rectangles
/// using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
/// locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
///  , all the rectangles are put in one cluster. Then, the small
/// clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
/// cluster, the average rectangle is computed and put into the output rectangle list.
///
/// ## Overloaded parameters
///
/// ## C++ default parameters
/// * eps: 0.2
pub fn group_rectangles_levels(rect_list: &mut core::Vector::<core::Rect>, reject_levels: &mut core::Vector::<i32>, level_weights: &mut core::Vector::<f64>, group_threshold: i32, eps: f64) -> Result<()> {
unsafe { sys::cv_groupRectangles_vector_Rect_R_vector_int_R_vector_double_R_int_double(rect_list.as_raw_mut_VectorOfRect(), reject_levels.as_raw_mut_VectorOfi32(), level_weights.as_raw_mut_VectorOff64(), group_threshold, eps) }.into_result()
}
pub trait BaseCascadeClassifier: core::AlgorithmTrait {
fn as_raw_BaseCascadeClassifier(&self) -> *const c_void;
fn as_raw_mut_BaseCascadeClassifier(&mut self) -> *mut c_void;
fn empty(&self) -> Result<bool> {
unsafe { sys::cv_BaseCascadeClassifier_empty_const(self.as_raw_BaseCascadeClassifier()) }.into_result()
}
fn load(&mut self, filename: &str) -> Result<bool> {
extern_container_arg!(filename);
unsafe { sys::cv_BaseCascadeClassifier_load_const_StringR(self.as_raw_mut_BaseCascadeClassifier(), filename.opencv_as_extern()) }.into_result()
}
fn detect_multi_scale(&mut self, image: &dyn core::ToInputArray, objects: &mut core::Vector::<core::Rect>, scale_factor: f64, min_neighbors: i32, flags: i32, min_size: core::Size, max_size: core::Size) -> Result<()> {
input_array_arg!(image);
unsafe { sys::cv_BaseCascadeClassifier_detectMultiScale_const__InputArrayR_vector_Rect_R_double_int_int_Size_Size(self.as_raw_mut_BaseCascadeClassifier(), image.as_raw__InputArray(), objects.as_raw_mut_VectorOfRect(), scale_factor, min_neighbors, flags, min_size.opencv_as_extern(), max_size.opencv_as_extern()) }.into_result()
}
fn detect_multi_scale_num(&mut self, image: &dyn core::ToInputArray, objects: &mut core::Vector::<core::Rect>, num_detections: &mut core::Vector::<i32>, scale_factor: f64, min_neighbors: i32, flags: i32, min_size: core::Size, max_size: core::Size) -> Result<()> {
input_array_arg!(image);
unsafe { sys::cv_BaseCascadeClassifier_detectMultiScale_const__InputArrayR_vector_Rect_R_vector_int_R_double_int_int_Size_Size(self.as_raw_mut_BaseCascadeClassifier(), image.as_raw__InputArray(), objects.as_raw_mut_VectorOfRect(), num_detections.as_raw_mut_VectorOfi32(), scale_factor, min_neighbors, flags, min_size.opencv_as_extern(), max_size.opencv_as_extern()) }.into_result()
}
fn detect_multi_scale_levels(&mut self, image: &dyn core::ToInputArray, objects: &mut core::Vector::<core::Rect>, reject_levels: &mut core::Vector::<i32>, level_weights: &mut core::Vector::<f64>, scale_factor: f64, min_neighbors: i32, flags: i32, min_size: core::Size, max_size: core::Size, output_reject_levels: bool) -> Result<()> {
input_array_arg!(image);
unsafe { sys::cv_BaseCascadeClassifier_detectMultiScale_const__InputArrayR_vector_Rect_R_vector_int_R_vector_double_R_double_int_int_Size_Size_bool(self.as_raw_mut_BaseCascadeClassifier(), image.as_raw__InputArray(), objects.as_raw_mut_VectorOfRect(), reject_levels.as_raw_mut_VectorOfi32(), level_weights.as_raw_mut_VectorOff64(), scale_factor, min_neighbors, flags, min_size.opencv_as_extern(), max_size.opencv_as_extern(), output_reject_levels) }.into_result()
}
fn is_old_format_cascade(&self) -> Result<bool> {
unsafe { sys::cv_BaseCascadeClassifier_isOldFormatCascade_const(self.as_raw_BaseCascadeClassifier()) }.into_result()
}
fn get_original_window_size(&self) -> Result<core::Size> {
unsafe { sys::cv_BaseCascadeClassifier_getOriginalWindowSize_const(self.as_raw_BaseCascadeClassifier()) }.into_result()
}
fn get_feature_type(&self) -> Result<i32> {
unsafe { sys::cv_BaseCascadeClassifier_getFeatureType_const(self.as_raw_BaseCascadeClassifier()) }.into_result()
}
fn get_old_cascade(&mut self) -> Result<*mut c_void> {
unsafe { sys::cv_BaseCascadeClassifier_getOldCascade(self.as_raw_mut_BaseCascadeClassifier()) }.into_result()
}
fn set_mask_generator(&mut self, mask_generator: &core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>) -> Result<()> {
unsafe { sys::cv_BaseCascadeClassifier_setMaskGenerator_const_Ptr_MaskGenerator_R(self.as_raw_mut_BaseCascadeClassifier(), mask_generator.as_raw_PtrOfBaseCascadeClassifier_MaskGenerator()) }.into_result()
}
fn get_mask_generator(&mut self) -> Result<core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>> {
unsafe { sys::cv_BaseCascadeClassifier_getMaskGenerator(self.as_raw_mut_BaseCascadeClassifier()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>::opencv_from_extern(r) } )
}
}
pub trait BaseCascadeClassifier_MaskGenerator {
fn as_raw_BaseCascadeClassifier_MaskGenerator(&self) -> *const c_void;
fn as_raw_mut_BaseCascadeClassifier_MaskGenerator(&mut self) -> *mut c_void;
fn generate_mask(&mut self, src: &core::Mat) -> Result<core::Mat> {
unsafe { sys::cv_BaseCascadeClassifier_MaskGenerator_generateMask_const_MatR(self.as_raw_mut_BaseCascadeClassifier_MaskGenerator(), src.as_raw_Mat()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } )
}
fn initialize_mask(&mut self, unnamed: &core::Mat) -> Result<()> {
unsafe { sys::cv_BaseCascadeClassifier_MaskGenerator_initializeMask_const_MatR(self.as_raw_mut_BaseCascadeClassifier_MaskGenerator(), unnamed.as_raw_Mat()) }.into_result()
}
}
/// Cascade classifier class for object detection.
pub trait CascadeClassifierTrait {
fn as_raw_CascadeClassifier(&self) -> *const c_void;
fn as_raw_mut_CascadeClassifier(&mut self) -> *mut c_void;
fn cc(&mut self) -> core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier> {
unsafe { sys::cv_CascadeClassifier_getPropCc(self.as_raw_mut_CascadeClassifier()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier>::opencv_from_extern(r) } ).expect("Infallible function failed: cc")
}
fn set_cc(&mut self, mut val: core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier>) -> () {
unsafe { sys::cv_CascadeClassifier_setPropCc_Ptr_BaseCascadeClassifier_(self.as_raw_mut_CascadeClassifier(), val.as_raw_mut_PtrOfBaseCascadeClassifier()) }.into_result().expect("Infallible function failed: set_cc")
}
/// Checks whether the classifier has been loaded.
fn empty(&self) -> Result<bool> {
unsafe { sys::cv_CascadeClassifier_empty_const(self.as_raw_CascadeClassifier()) }.into_result()
}
/// Loads a classifier from a file.
///
/// ## Parameters
/// * filename: Name of the file from which the classifier is loaded. The file may contain an old
/// HAAR classifier trained by the haartraining application or a new cascade classifier trained by the
/// traincascade application.
fn load(&mut self, filename: &str) -> Result<bool> {
extern_container_arg!(filename);
unsafe { sys::cv_CascadeClassifier_load_const_StringR(self.as_raw_mut_CascadeClassifier(), filename.opencv_as_extern()) }.into_result()
}
/// Reads a classifier from a FileStorage node.
///
///
/// Note: The file may contain a new cascade classifier (trained traincascade application) only.
fn read(&mut self, node: &core::FileNode) -> Result<bool> {
unsafe { sys::cv_CascadeClassifier_read_const_FileNodeR(self.as_raw_mut_CascadeClassifier(), node.as_raw_FileNode()) }.into_result()
}
/// Detects objects of different sizes in the input image. The detected objects are returned as a list
/// of rectangles.
///
/// ## Parameters
/// * image: Matrix of the type CV_8U containing an image where objects are detected.
/// * objects: Vector of rectangles where each rectangle contains the detected object, the
/// rectangles may be partially outside the original image.
/// * scaleFactor: Parameter specifying how much the image size is reduced at each image scale.
/// * minNeighbors: Parameter specifying how many neighbors each candidate rectangle should have
/// to retain it.
/// * flags: Parameter with the same meaning for an old cascade as in the function
/// cvHaarDetectObjects. It is not used for a new cascade.
/// * minSize: Minimum possible object size. Objects smaller than that are ignored.
/// * maxSize: Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
///
/// The function is parallelized with the TBB library.
///
///
/// Note:
/// * (Python) A face detection example using cascade classifiers can be found at
/// opencv_source_code/samples/python/facedetect.py
///
/// ## C++ default parameters
/// * scale_factor: 1.1
/// * min_neighbors: 3
/// * flags: 0
/// * min_size: Size()
/// * max_size: Size()
fn detect_multi_scale(&mut self, image: &dyn core::ToInputArray, objects: &mut core::Vector::<core::Rect>, scale_factor: f64, min_neighbors: i32, flags: i32, min_size: core::Size, max_size: core::Size) -> Result<()> {
input_array_arg!(image);
unsafe { sys::cv_CascadeClassifier_detectMultiScale_const__InputArrayR_vector_Rect_R_double_int_int_Size_Size(self.as_raw_mut_CascadeClassifier(), image.as_raw__InputArray(), objects.as_raw_mut_VectorOfRect(), scale_factor, min_neighbors, flags, min_size.opencv_as_extern(), max_size.opencv_as_extern()) }.into_result()
}
/// Detects objects of different sizes in the input image. The detected objects are returned as a list
/// of rectangles.
///
/// ## Parameters
/// * image: Matrix of the type CV_8U containing an image where objects are detected.
/// * objects: Vector of rectangles where each rectangle contains the detected object, the
/// rectangles may be partially outside the original image.
/// * scaleFactor: Parameter specifying how much the image size is reduced at each image scale.
/// * minNeighbors: Parameter specifying how many neighbors each candidate rectangle should have
/// to retain it.
/// * flags: Parameter with the same meaning for an old cascade as in the function
/// cvHaarDetectObjects. It is not used for a new cascade.
/// * minSize: Minimum possible object size. Objects smaller than that are ignored.
/// * maxSize: Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
///
/// The function is parallelized with the TBB library.
///
///
/// Note:
/// * (Python) A face detection example using cascade classifiers can be found at
/// opencv_source_code/samples/python/facedetect.py
///
/// ## Overloaded parameters
///
/// * image: Matrix of the type CV_8U containing an image where objects are detected.
/// * objects: Vector of rectangles where each rectangle contains the detected object, the
/// rectangles may be partially outside the original image.
/// * numDetections: Vector of detection numbers for the corresponding objects. An object's number
/// of detections is the number of neighboring positively classified rectangles that were joined
/// together to form the object.
/// * scaleFactor: Parameter specifying how much the image size is reduced at each image scale.
/// * minNeighbors: Parameter specifying how many neighbors each candidate rectangle should have
/// to retain it.
/// * flags: Parameter with the same meaning for an old cascade as in the function
/// cvHaarDetectObjects. It is not used for a new cascade.
/// * minSize: Minimum possible object size. Objects smaller than that are ignored.
/// * maxSize: Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
///
/// ## C++ default parameters
/// * scale_factor: 1.1
/// * min_neighbors: 3
/// * flags: 0
/// * min_size: Size()
/// * max_size: Size()
fn detect_multi_scale2(&mut self, image: &dyn core::ToInputArray, objects: &mut core::Vector::<core::Rect>, num_detections: &mut core::Vector::<i32>, scale_factor: f64, min_neighbors: i32, flags: i32, min_size: core::Size, max_size: core::Size) -> Result<()> {
input_array_arg!(image);
unsafe { sys::cv_CascadeClassifier_detectMultiScale_const__InputArrayR_vector_Rect_R_vector_int_R_double_int_int_Size_Size(self.as_raw_mut_CascadeClassifier(), image.as_raw__InputArray(), objects.as_raw_mut_VectorOfRect(), num_detections.as_raw_mut_VectorOfi32(), scale_factor, min_neighbors, flags, min_size.opencv_as_extern(), max_size.opencv_as_extern()) }.into_result()
}
/// Detects objects of different sizes in the input image. The detected objects are returned as a list
/// of rectangles.
///
/// ## Parameters
/// * image: Matrix of the type CV_8U containing an image where objects are detected.
/// * objects: Vector of rectangles where each rectangle contains the detected object, the
/// rectangles may be partially outside the original image.
/// * scaleFactor: Parameter specifying how much the image size is reduced at each image scale.
/// * minNeighbors: Parameter specifying how many neighbors each candidate rectangle should have
/// to retain it.
/// * flags: Parameter with the same meaning for an old cascade as in the function
/// cvHaarDetectObjects. It is not used for a new cascade.
/// * minSize: Minimum possible object size. Objects smaller than that are ignored.
/// * maxSize: Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
///
/// The function is parallelized with the TBB library.
///
///
/// Note:
/// * (Python) A face detection example using cascade classifiers can be found at
/// opencv_source_code/samples/python/facedetect.py
///
/// ## Overloaded parameters
///
/// if `outputRejectLevels` is `true` returns `rejectLevels` and `levelWeights`
///
/// ## C++ default parameters
/// * scale_factor: 1.1
/// * min_neighbors: 3
/// * flags: 0
/// * min_size: Size()
/// * max_size: Size()
/// * output_reject_levels: false
fn detect_multi_scale3(&mut self, image: &dyn core::ToInputArray, objects: &mut core::Vector::<core::Rect>, reject_levels: &mut core::Vector::<i32>, level_weights: &mut core::Vector::<f64>, scale_factor: f64, min_neighbors: i32, flags: i32, min_size: core::Size, max_size: core::Size, output_reject_levels: bool) -> Result<()> {
input_array_arg!(image);
unsafe { sys::cv_CascadeClassifier_detectMultiScale_const__InputArrayR_vector_Rect_R_vector_int_R_vector_double_R_double_int_int_Size_Size_bool(self.as_raw_mut_CascadeClassifier(), image.as_raw__InputArray(), objects.as_raw_mut_VectorOfRect(), reject_levels.as_raw_mut_VectorOfi32(), level_weights.as_raw_mut_VectorOff64(), scale_factor, min_neighbors, flags, min_size.opencv_as_extern(), max_size.opencv_as_extern(), output_reject_levels) }.into_result()
}
fn is_old_format_cascade(&self) -> Result<bool> {
unsafe { sys::cv_CascadeClassifier_isOldFormatCascade_const(self.as_raw_CascadeClassifier()) }.into_result()
}
fn get_original_window_size(&self) -> Result<core::Size> {
unsafe { sys::cv_CascadeClassifier_getOriginalWindowSize_const(self.as_raw_CascadeClassifier()) }.into_result()
}
fn get_feature_type(&self) -> Result<i32> {
unsafe { sys::cv_CascadeClassifier_getFeatureType_const(self.as_raw_CascadeClassifier()) }.into_result()
}
fn get_old_cascade(&mut self) -> Result<*mut c_void> {
unsafe { sys::cv_CascadeClassifier_getOldCascade(self.as_raw_mut_CascadeClassifier()) }.into_result()
}
fn set_mask_generator(&mut self, mask_generator: &core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>) -> Result<()> {
unsafe { sys::cv_CascadeClassifier_setMaskGenerator_const_Ptr_MaskGenerator_R(self.as_raw_mut_CascadeClassifier(), mask_generator.as_raw_PtrOfBaseCascadeClassifier_MaskGenerator()) }.into_result()
}
fn get_mask_generator(&mut self) -> Result<core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>> {
unsafe { sys::cv_CascadeClassifier_getMaskGenerator(self.as_raw_mut_CascadeClassifier()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::objdetect::BaseCascadeClassifier_MaskGenerator>::opencv_from_extern(r) } )
}
}
/// Cascade classifier class for object detection.
pub struct CascadeClassifier {
ptr: *mut c_void
}
opencv_type_boxed! { CascadeClassifier }
impl Drop for CascadeClassifier {
fn drop(&mut self) {
extern "C" { fn cv_CascadeClassifier_delete(instance: *mut c_void); }
unsafe { cv_CascadeClassifier_delete(self.as_raw_mut_CascadeClassifier()) };
}
}
impl CascadeClassifier {
#[inline] pub fn as_raw_CascadeClassifier(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_CascadeClassifier(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for CascadeClassifier {}
impl crate::objdetect::CascadeClassifierTrait for CascadeClassifier {
#[inline] fn as_raw_CascadeClassifier(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_CascadeClassifier(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl CascadeClassifier {
pub fn default() -> Result<crate::objdetect::CascadeClassifier> {
unsafe { sys::cv_CascadeClassifier_CascadeClassifier() }.into_result().map(|r| unsafe { crate::objdetect::CascadeClassifier::opencv_from_extern(r) } )
}
/// Loads a classifier from a file.
///
/// ## Parameters
/// * filename: Name of the file from which the classifier is loaded.
pub fn new(filename: &str) -> Result<crate::objdetect::CascadeClassifier> {
extern_container_arg!(filename);
unsafe { sys::cv_CascadeClassifier_CascadeClassifier_const_StringR(filename.opencv_as_extern()) }.into_result().map(|r| unsafe { crate::objdetect::CascadeClassifier::opencv_from_extern(r) } )
}
pub fn convert(oldcascade: &str, newcascade: &str) -> Result<bool> {
extern_container_arg!(oldcascade);
extern_container_arg!(newcascade);
unsafe { sys::cv_CascadeClassifier_convert_const_StringR_const_StringR(oldcascade.opencv_as_extern(), newcascade.opencv_as_extern()) }.into_result()
}
}
pub trait DetectionBasedTrackerTrait {
fn as_raw_DetectionBasedTracker(&self) -> *const c_void;
fn as_raw_mut_DetectionBasedTracker(&mut self) -> *mut c_void;
fn run(&mut self) -> Result<bool> {
unsafe { sys::cv_DetectionBasedTracker_run(self.as_raw_mut_DetectionBasedTracker()) }.into_result()
}
fn stop(&mut self) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_stop(self.as_raw_mut_DetectionBasedTracker()) }.into_result()
}
fn reset_tracking(&mut self) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_resetTracking(self.as_raw_mut_DetectionBasedTracker()) }.into_result()
}
fn process(&mut self, image_gray: &core::Mat) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_process_const_MatR(self.as_raw_mut_DetectionBasedTracker(), image_gray.as_raw_Mat()) }.into_result()
}
fn set_parameters(&mut self, params: &crate::objdetect::DetectionBasedTracker_Parameters) -> Result<bool> {
unsafe { sys::cv_DetectionBasedTracker_setParameters_const_ParametersR(self.as_raw_mut_DetectionBasedTracker(), params.as_raw_DetectionBasedTracker_Parameters()) }.into_result()
}
fn get_parameters(&self) -> Result<crate::objdetect::DetectionBasedTracker_Parameters> {
unsafe { sys::cv_DetectionBasedTracker_getParameters_const(self.as_raw_DetectionBasedTracker()) }.into_result().map(|r| unsafe { crate::objdetect::DetectionBasedTracker_Parameters::opencv_from_extern(r) } )
}
fn get_objects(&self, result: &mut core::Vector::<core::Rect>) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_getObjects_const_vector_Rect_R(self.as_raw_DetectionBasedTracker(), result.as_raw_mut_VectorOfRect()) }.into_result()
}
fn get_objects_1(&self, result: &mut core::Vector::<crate::objdetect::DetectionBasedTracker_ExtObject>) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_getObjects_const_vector_ExtObject_R(self.as_raw_DetectionBasedTracker(), result.as_raw_mut_VectorOfDetectionBasedTracker_ExtObject()) }.into_result()
}
fn add_object(&mut self, location: core::Rect) -> Result<i32> {
unsafe { sys::cv_DetectionBasedTracker_addObject_const_RectR(self.as_raw_mut_DetectionBasedTracker(), &location) }.into_result()
}
}
pub struct DetectionBasedTracker {
ptr: *mut c_void
}
opencv_type_boxed! { DetectionBasedTracker }
impl Drop for DetectionBasedTracker {
fn drop(&mut self) {
extern "C" { fn cv_DetectionBasedTracker_delete(instance: *mut c_void); }
unsafe { cv_DetectionBasedTracker_delete(self.as_raw_mut_DetectionBasedTracker()) };
}
}
impl DetectionBasedTracker {
#[inline] pub fn as_raw_DetectionBasedTracker(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_DetectionBasedTracker(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for DetectionBasedTracker {}
impl crate::objdetect::DetectionBasedTrackerTrait for DetectionBasedTracker {
#[inline] fn as_raw_DetectionBasedTracker(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_DetectionBasedTracker(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl DetectionBasedTracker {
pub fn new(mut main_detector: core::Ptr::<dyn crate::objdetect::DetectionBasedTracker_IDetector>, mut tracking_detector: core::Ptr::<dyn crate::objdetect::DetectionBasedTracker_IDetector>, params: &crate::objdetect::DetectionBasedTracker_Parameters) -> Result<crate::objdetect::DetectionBasedTracker> {
unsafe { sys::cv_DetectionBasedTracker_DetectionBasedTracker_Ptr_IDetector__Ptr_IDetector__const_ParametersR(main_detector.as_raw_mut_PtrOfDetectionBasedTracker_IDetector(), tracking_detector.as_raw_mut_PtrOfDetectionBasedTracker_IDetector(), params.as_raw_DetectionBasedTracker_Parameters()) }.into_result().map(|r| unsafe { crate::objdetect::DetectionBasedTracker::opencv_from_extern(r) } )
}
}
pub trait DetectionBasedTracker_ExtObjectTrait {
fn as_raw_DetectionBasedTracker_ExtObject(&self) -> *const c_void;
fn as_raw_mut_DetectionBasedTracker_ExtObject(&mut self) -> *mut c_void;
fn id(&self) -> i32 {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_getPropId_const(self.as_raw_DetectionBasedTracker_ExtObject()) }.into_result().expect("Infallible function failed: id")
}
fn set_id(&mut self, val: i32) -> () {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_setPropId_int(self.as_raw_mut_DetectionBasedTracker_ExtObject(), val) }.into_result().expect("Infallible function failed: set_id")
}
fn location(&self) -> core::Rect {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_getPropLocation_const(self.as_raw_DetectionBasedTracker_ExtObject()) }.into_result().expect("Infallible function failed: location")
}
fn set_location(&mut self, val: core::Rect) -> () {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_setPropLocation_Rect(self.as_raw_mut_DetectionBasedTracker_ExtObject(), val.opencv_as_extern()) }.into_result().expect("Infallible function failed: set_location")
}
fn status(&self) -> crate::objdetect::DetectionBasedTracker_ObjectStatus {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_getPropStatus_const(self.as_raw_DetectionBasedTracker_ExtObject()) }.into_result().expect("Infallible function failed: status")
}
fn set_status(&mut self, val: crate::objdetect::DetectionBasedTracker_ObjectStatus) -> () {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_setPropStatus_ObjectStatus(self.as_raw_mut_DetectionBasedTracker_ExtObject(), val) }.into_result().expect("Infallible function failed: set_status")
}
}
pub struct DetectionBasedTracker_ExtObject {
ptr: *mut c_void
}
opencv_type_boxed! { DetectionBasedTracker_ExtObject }
impl Drop for DetectionBasedTracker_ExtObject {
fn drop(&mut self) {
extern "C" { fn cv_DetectionBasedTracker_ExtObject_delete(instance: *mut c_void); }
unsafe { cv_DetectionBasedTracker_ExtObject_delete(self.as_raw_mut_DetectionBasedTracker_ExtObject()) };
}
}
impl DetectionBasedTracker_ExtObject {
#[inline] pub fn as_raw_DetectionBasedTracker_ExtObject(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_DetectionBasedTracker_ExtObject(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for DetectionBasedTracker_ExtObject {}
impl crate::objdetect::DetectionBasedTracker_ExtObjectTrait for DetectionBasedTracker_ExtObject {
#[inline] fn as_raw_DetectionBasedTracker_ExtObject(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_DetectionBasedTracker_ExtObject(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl DetectionBasedTracker_ExtObject {
pub fn new(_id: i32, _location: core::Rect, _status: crate::objdetect::DetectionBasedTracker_ObjectStatus) -> Result<crate::objdetect::DetectionBasedTracker_ExtObject> {
unsafe { sys::cv_DetectionBasedTracker_ExtObject_ExtObject_int_Rect_ObjectStatus(_id, _location.opencv_as_extern(), _status) }.into_result().map(|r| unsafe { crate::objdetect::DetectionBasedTracker_ExtObject::opencv_from_extern(r) } )
}
}
pub trait DetectionBasedTracker_IDetector {
fn as_raw_DetectionBasedTracker_IDetector(&self) -> *const c_void;
fn as_raw_mut_DetectionBasedTracker_IDetector(&mut self) -> *mut c_void;
fn detect(&mut self, image: &core::Mat, objects: &mut core::Vector::<core::Rect>) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_detect_const_MatR_vector_Rect_R(self.as_raw_mut_DetectionBasedTracker_IDetector(), image.as_raw_Mat(), objects.as_raw_mut_VectorOfRect()) }.into_result()
}
fn set_min_object_size(&mut self, min: core::Size) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_setMinObjectSize_const_SizeR(self.as_raw_mut_DetectionBasedTracker_IDetector(), &min) }.into_result()
}
fn set_max_object_size(&mut self, max: core::Size) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_setMaxObjectSize_const_SizeR(self.as_raw_mut_DetectionBasedTracker_IDetector(), &max) }.into_result()
}
fn get_min_object_size(&self) -> Result<core::Size> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_getMinObjectSize_const(self.as_raw_DetectionBasedTracker_IDetector()) }.into_result()
}
fn get_max_object_size(&self) -> Result<core::Size> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_getMaxObjectSize_const(self.as_raw_DetectionBasedTracker_IDetector()) }.into_result()
}
fn get_scale_factor(&mut self) -> Result<f32> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_getScaleFactor(self.as_raw_mut_DetectionBasedTracker_IDetector()) }.into_result()
}
fn set_scale_factor(&mut self, value: f32) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_setScaleFactor_float(self.as_raw_mut_DetectionBasedTracker_IDetector(), value) }.into_result()
}
fn get_min_neighbours(&mut self) -> Result<i32> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_getMinNeighbours(self.as_raw_mut_DetectionBasedTracker_IDetector()) }.into_result()
}
fn set_min_neighbours(&mut self, value: i32) -> Result<()> {
unsafe { sys::cv_DetectionBasedTracker_IDetector_setMinNeighbours_int(self.as_raw_mut_DetectionBasedTracker_IDetector(), value) }.into_result()
}
}
pub trait DetectionBasedTracker_ParametersTrait {
fn as_raw_DetectionBasedTracker_Parameters(&self) -> *const c_void;
fn as_raw_mut_DetectionBasedTracker_Parameters(&mut self) -> *mut c_void;
fn max_track_lifetime(&self) -> i32 {
unsafe { sys::cv_DetectionBasedTracker_Parameters_getPropMaxTrackLifetime_const(self.as_raw_DetectionBasedTracker_Parameters()) }.into_result().expect("Infallible function failed: max_track_lifetime")
}
fn set_max_track_lifetime(&mut self, val: i32) -> () {
unsafe { sys::cv_DetectionBasedTracker_Parameters_setPropMaxTrackLifetime_int(self.as_raw_mut_DetectionBasedTracker_Parameters(), val) }.into_result().expect("Infallible function failed: set_max_track_lifetime")
}
fn min_detection_period(&self) -> i32 {
unsafe { sys::cv_DetectionBasedTracker_Parameters_getPropMinDetectionPeriod_const(self.as_raw_DetectionBasedTracker_Parameters()) }.into_result().expect("Infallible function failed: min_detection_period")
}
fn set_min_detection_period(&mut self, val: i32) -> () {
unsafe { sys::cv_DetectionBasedTracker_Parameters_setPropMinDetectionPeriod_int(self.as_raw_mut_DetectionBasedTracker_Parameters(), val) }.into_result().expect("Infallible function failed: set_min_detection_period")
}
}
pub struct DetectionBasedTracker_Parameters {
ptr: *mut c_void
}
opencv_type_boxed! { DetectionBasedTracker_Parameters }
impl Drop for DetectionBasedTracker_Parameters {
fn drop(&mut self) {
extern "C" { fn cv_DetectionBasedTracker_Parameters_delete(instance: *mut c_void); }
unsafe { cv_DetectionBasedTracker_Parameters_delete(self.as_raw_mut_DetectionBasedTracker_Parameters()) };
}
}
impl DetectionBasedTracker_Parameters {
#[inline] pub fn as_raw_DetectionBasedTracker_Parameters(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_DetectionBasedTracker_Parameters(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for DetectionBasedTracker_Parameters {}
impl crate::objdetect::DetectionBasedTracker_ParametersTrait for DetectionBasedTracker_Parameters {
#[inline] fn as_raw_DetectionBasedTracker_Parameters(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_DetectionBasedTracker_Parameters(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl DetectionBasedTracker_Parameters {
pub fn default() -> Result<crate::objdetect::DetectionBasedTracker_Parameters> {
unsafe { sys::cv_DetectionBasedTracker_Parameters_Parameters() }.into_result().map(|r| unsafe { crate::objdetect::DetectionBasedTracker_Parameters::opencv_from_extern(r) } )
}
}
/// struct for detection region of interest (ROI)
pub trait DetectionROITrait {
fn as_raw_DetectionROI(&self) -> *const c_void;
fn as_raw_mut_DetectionROI(&mut self) -> *mut c_void;
/// scale(size) of the bounding box
fn scale(&self) -> f64 {
unsafe { sys::cv_DetectionROI_getPropScale_const(self.as_raw_DetectionROI()) }.into_result().expect("Infallible function failed: scale")
}
/// scale(size) of the bounding box
fn set_scale(&mut self, val: f64) -> () {
unsafe { sys::cv_DetectionROI_setPropScale_double(self.as_raw_mut_DetectionROI(), val) }.into_result().expect("Infallible function failed: set_scale")
}
/// set of requrested locations to be evaluated
fn locations(&mut self) -> core::Vector::<core::Point> {
unsafe { sys::cv_DetectionROI_getPropLocations(self.as_raw_mut_DetectionROI()) }.into_result().map(|r| unsafe { core::Vector::<core::Point>::opencv_from_extern(r) } ).expect("Infallible function failed: locations")
}
/// set of requrested locations to be evaluated
fn set_locations(&mut self, mut val: core::Vector::<core::Point>) -> () {
unsafe { sys::cv_DetectionROI_setPropLocations_vector_Point_(self.as_raw_mut_DetectionROI(), val.as_raw_mut_VectorOfPoint()) }.into_result().expect("Infallible function failed: set_locations")
}
/// vector that will contain confidence values for each location
fn confidences(&mut self) -> core::Vector::<f64> {
unsafe { sys::cv_DetectionROI_getPropConfidences(self.as_raw_mut_DetectionROI()) }.into_result().map(|r| unsafe { core::Vector::<f64>::opencv_from_extern(r) } ).expect("Infallible function failed: confidences")
}
/// vector that will contain confidence values for each location
fn set_confidences(&mut self, mut val: core::Vector::<f64>) -> () {
unsafe { sys::cv_DetectionROI_setPropConfidences_vector_double_(self.as_raw_mut_DetectionROI(), val.as_raw_mut_VectorOff64()) }.into_result().expect("Infallible function failed: set_confidences")
}
}
/// struct for detection region of interest (ROI)
pub struct DetectionROI {
ptr: *mut c_void
}
opencv_type_boxed! { DetectionROI }
impl Drop for DetectionROI {
fn drop(&mut self) {
extern "C" { fn cv_DetectionROI_delete(instance: *mut c_void); }
unsafe { cv_DetectionROI_delete(self.as_raw_mut_DetectionROI()) };
}
}
impl DetectionROI {
#[inline] pub fn as_raw_DetectionROI(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_DetectionROI(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for DetectionROI {}
impl crate::objdetect::DetectionROITrait for DetectionROI {
#[inline] fn as_raw_DetectionROI(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_DetectionROI(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl DetectionROI {
}
pub trait HOGDescriptorTrait {
fn as_raw_HOGDescriptor(&self) -> *const c_void;
fn as_raw_mut_HOGDescriptor(&mut self) -> *mut c_void;
fn win_size(&self) -> core::Size {
unsafe { sys::cv_HOGDescriptor_getPropWinSize_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: win_size")
}
fn set_win_size(&mut self, val: core::Size) -> () {
unsafe { sys::cv_HOGDescriptor_setPropWinSize_Size(self.as_raw_mut_HOGDescriptor(), val.opencv_as_extern()) }.into_result().expect("Infallible function failed: set_win_size")
}
fn block_size(&self) -> core::Size {
unsafe { sys::cv_HOGDescriptor_getPropBlockSize_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: block_size")
}
fn set_block_size(&mut self, val: core::Size) -> () {
unsafe { sys::cv_HOGDescriptor_setPropBlockSize_Size(self.as_raw_mut_HOGDescriptor(), val.opencv_as_extern()) }.into_result().expect("Infallible function failed: set_block_size")
}
fn block_stride(&self) -> core::Size {
unsafe { sys::cv_HOGDescriptor_getPropBlockStride_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: block_stride")
}
fn set_block_stride(&mut self, val: core::Size) -> () {
unsafe { sys::cv_HOGDescriptor_setPropBlockStride_Size(self.as_raw_mut_HOGDescriptor(), val.opencv_as_extern()) }.into_result().expect("Infallible function failed: set_block_stride")
}
fn cell_size(&self) -> core::Size {
unsafe { sys::cv_HOGDescriptor_getPropCellSize_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: cell_size")
}
fn set_cell_size(&mut self, val: core::Size) -> () {
unsafe { sys::cv_HOGDescriptor_setPropCellSize_Size(self.as_raw_mut_HOGDescriptor(), val.opencv_as_extern()) }.into_result().expect("Infallible function failed: set_cell_size")
}
fn nbins(&self) -> i32 {
unsafe { sys::cv_HOGDescriptor_getPropNbins_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: nbins")
}
fn set_nbins(&mut self, val: i32) -> () {
unsafe { sys::cv_HOGDescriptor_setPropNbins_int(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_nbins")
}
fn deriv_aperture(&self) -> i32 {
unsafe { sys::cv_HOGDescriptor_getPropDerivAperture_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: deriv_aperture")
}
fn set_deriv_aperture(&mut self, val: i32) -> () {
unsafe { sys::cv_HOGDescriptor_setPropDerivAperture_int(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_deriv_aperture")
}
fn win_sigma(&self) -> f64 {
unsafe { sys::cv_HOGDescriptor_getPropWinSigma_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: win_sigma")
}
fn set_win_sigma(&mut self, val: f64) -> () {
unsafe { sys::cv_HOGDescriptor_setPropWinSigma_double(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_win_sigma")
}
fn histogram_norm_type(&self) -> i32 {
unsafe { sys::cv_HOGDescriptor_getPropHistogramNormType_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: histogram_norm_type")
}
fn set_histogram_norm_type(&mut self, val: i32) -> () {
unsafe { sys::cv_HOGDescriptor_setPropHistogramNormType_int(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_histogram_norm_type")
}
fn l2_hys_threshold(&self) -> f64 {
unsafe { sys::cv_HOGDescriptor_getPropL2HysThreshold_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: l2_hys_threshold")
}
fn set_l2_hys_threshold(&mut self, val: f64) -> () {
unsafe { sys::cv_HOGDescriptor_setPropL2HysThreshold_double(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_l2_hys_threshold")
}
fn gamma_correction(&self) -> bool {
unsafe { sys::cv_HOGDescriptor_getPropGammaCorrection_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: gamma_correction")
}
fn set_gamma_correction(&mut self, val: bool) -> () {
unsafe { sys::cv_HOGDescriptor_setPropGammaCorrection_bool(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_gamma_correction")
}
fn svm_detector(&mut self) -> core::Vector::<f32> {
unsafe { sys::cv_HOGDescriptor_getPropSvmDetector(self.as_raw_mut_HOGDescriptor()) }.into_result().map(|r| unsafe { core::Vector::<f32>::opencv_from_extern(r) } ).expect("Infallible function failed: svm_detector")
}
fn set_svm_detector_vec(&mut self, mut val: core::Vector::<f32>) -> () {
unsafe { sys::cv_HOGDescriptor_setPropSvmDetector_vector_float_(self.as_raw_mut_HOGDescriptor(), val.as_raw_mut_VectorOff32()) }.into_result().expect("Infallible function failed: set_svm_detector_vec")
}
fn ocl_svm_detector(&mut self) -> core::UMat {
unsafe { sys::cv_HOGDescriptor_getPropOclSvmDetector(self.as_raw_mut_HOGDescriptor()) }.into_result().map(|r| unsafe { core::UMat::opencv_from_extern(r) } ).expect("Infallible function failed: ocl_svm_detector")
}
fn set_ocl_svm_detector(&mut self, mut val: core::UMat) -> () {
unsafe { sys::cv_HOGDescriptor_setPropOclSvmDetector_UMat(self.as_raw_mut_HOGDescriptor(), val.as_raw_mut_UMat()) }.into_result().expect("Infallible function failed: set_ocl_svm_detector")
}
fn free_coef(&self) -> f32 {
unsafe { sys::cv_HOGDescriptor_getPropFree_coef_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: free_coef")
}
fn set_free_coef(&mut self, val: f32) -> () {
unsafe { sys::cv_HOGDescriptor_setPropFree_coef_float(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_free_coef")
}
fn nlevels(&self) -> i32 {
unsafe { sys::cv_HOGDescriptor_getPropNlevels_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: nlevels")
}
fn set_nlevels(&mut self, val: i32) -> () {
unsafe { sys::cv_HOGDescriptor_setPropNlevels_int(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_nlevels")
}
fn signed_gradient(&self) -> bool {
unsafe { sys::cv_HOGDescriptor_getPropSignedGradient_const(self.as_raw_HOGDescriptor()) }.into_result().expect("Infallible function failed: signed_gradient")
}
fn set_signed_gradient(&mut self, val: bool) -> () {
unsafe { sys::cv_HOGDescriptor_setPropSignedGradient_bool(self.as_raw_mut_HOGDescriptor(), val) }.into_result().expect("Infallible function failed: set_signed_gradient")
}
fn get_descriptor_size(&self) -> Result<size_t> {
unsafe { sys::cv_HOGDescriptor_getDescriptorSize_const(self.as_raw_HOGDescriptor()) }.into_result()
}
fn check_detector_size(&self) -> Result<bool> {
unsafe { sys::cv_HOGDescriptor_checkDetectorSize_const(self.as_raw_HOGDescriptor()) }.into_result()
}
fn get_win_sigma(&self) -> Result<f64> {
unsafe { sys::cv_HOGDescriptor_getWinSigma_const(self.as_raw_HOGDescriptor()) }.into_result()
}
fn set_svm_detector(&mut self, _svmdetector: &dyn core::ToInputArray) -> Result<()> {
input_array_arg!(_svmdetector);
unsafe { sys::cv_HOGDescriptor_setSVMDetector_const__InputArrayR(self.as_raw_mut_HOGDescriptor(), _svmdetector.as_raw__InputArray()) }.into_result()
}
fn read(&mut self, fn_: &mut core::FileNode) -> Result<bool> {
unsafe { sys::cv_HOGDescriptor_read_FileNodeR(self.as_raw_mut_HOGDescriptor(), fn_.as_raw_mut_FileNode()) }.into_result()
}
fn write(&self, fs: &mut core::FileStorage, objname: &str) -> Result<()> {
extern_container_arg!(objname);
unsafe { sys::cv_HOGDescriptor_write_const_FileStorageR_const_StringR(self.as_raw_HOGDescriptor(), fs.as_raw_mut_FileStorage(), objname.opencv_as_extern()) }.into_result()
}
/// ## C++ default parameters
/// * objname: String()
fn load(&mut self, filename: &str, objname: &str) -> Result<bool> {
extern_container_arg!(filename);
extern_container_arg!(objname);
unsafe { sys::cv_HOGDescriptor_load_const_StringR_const_StringR(self.as_raw_mut_HOGDescriptor(), filename.opencv_as_extern(), objname.opencv_as_extern()) }.into_result()
}
/// ## C++ default parameters
/// * objname: String()
fn save(&self, filename: &str, objname: &str) -> Result<()> {
extern_container_arg!(filename);
extern_container_arg!(objname);
unsafe { sys::cv_HOGDescriptor_save_const_const_StringR_const_StringR(self.as_raw_HOGDescriptor(), filename.opencv_as_extern(), objname.opencv_as_extern()) }.into_result()
}
fn copy_to(&self, c: &mut crate::objdetect::HOGDescriptor) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_copyTo_const_HOGDescriptorR(self.as_raw_HOGDescriptor(), c.as_raw_mut_HOGDescriptor()) }.into_result()
}
/// ## C++ default parameters
/// * win_stride: Size()
/// * padding: Size()
/// * locations: std::vector<Point>()
fn compute(&self, img: &dyn core::ToInputArray, descriptors: &mut core::Vector::<f32>, win_stride: core::Size, padding: core::Size, locations: &core::Vector::<core::Point>) -> Result<()> {
input_array_arg!(img);
unsafe { sys::cv_HOGDescriptor_compute_const_const__InputArrayR_vector_float_R_Size_Size_const_vector_Point_R(self.as_raw_HOGDescriptor(), img.as_raw__InputArray(), descriptors.as_raw_mut_VectorOff32(), win_stride.opencv_as_extern(), padding.opencv_as_extern(), locations.as_raw_VectorOfPoint()) }.into_result()
}
/// with found weights output
///
/// ## C++ default parameters
/// * hit_threshold: 0
/// * win_stride: Size()
/// * padding: Size()
/// * search_locations: std::vector<Point>()
fn detect_weights(&self, img: &core::Mat, found_locations: &mut core::Vector::<core::Point>, weights: &mut core::Vector::<f64>, hit_threshold: f64, win_stride: core::Size, padding: core::Size, search_locations: &core::Vector::<core::Point>) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_detect_const_const_MatR_vector_Point_R_vector_double_R_double_Size_Size_const_vector_Point_R(self.as_raw_HOGDescriptor(), img.as_raw_Mat(), found_locations.as_raw_mut_VectorOfPoint(), weights.as_raw_mut_VectorOff64(), hit_threshold, win_stride.opencv_as_extern(), padding.opencv_as_extern(), search_locations.as_raw_VectorOfPoint()) }.into_result()
}
/// without found weights output
///
/// ## C++ default parameters
/// * hit_threshold: 0
/// * win_stride: Size()
/// * padding: Size()
/// * search_locations: std::vector<Point>()
fn detect(&self, img: &core::Mat, found_locations: &mut core::Vector::<core::Point>, hit_threshold: f64, win_stride: core::Size, padding: core::Size, search_locations: &core::Vector::<core::Point>) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_detect_const_const_MatR_vector_Point_R_double_Size_Size_const_vector_Point_R(self.as_raw_HOGDescriptor(), img.as_raw_Mat(), found_locations.as_raw_mut_VectorOfPoint(), hit_threshold, win_stride.opencv_as_extern(), padding.opencv_as_extern(), search_locations.as_raw_VectorOfPoint()) }.into_result()
}
/// with result weights output
///
/// ## C++ default parameters
/// * hit_threshold: 0
/// * win_stride: Size()
/// * padding: Size()
/// * scale: 1.05
/// * final_threshold: 2.0
/// * use_meanshift_grouping: false
fn detect_multi_scale_weights(&self, img: &dyn core::ToInputArray, found_locations: &mut core::Vector::<core::Rect>, found_weights: &mut core::Vector::<f64>, hit_threshold: f64, win_stride: core::Size, padding: core::Size, scale: f64, final_threshold: f64, use_meanshift_grouping: bool) -> Result<()> {
input_array_arg!(img);
unsafe { sys::cv_HOGDescriptor_detectMultiScale_const_const__InputArrayR_vector_Rect_R_vector_double_R_double_Size_Size_double_double_bool(self.as_raw_HOGDescriptor(), img.as_raw__InputArray(), found_locations.as_raw_mut_VectorOfRect(), found_weights.as_raw_mut_VectorOff64(), hit_threshold, win_stride.opencv_as_extern(), padding.opencv_as_extern(), scale, final_threshold, use_meanshift_grouping) }.into_result()
}
/// without found weights output
///
/// ## C++ default parameters
/// * hit_threshold: 0
/// * win_stride: Size()
/// * padding: Size()
/// * scale: 1.05
/// * final_threshold: 2.0
/// * use_meanshift_grouping: false
fn detect_multi_scale(&self, img: &dyn core::ToInputArray, found_locations: &mut core::Vector::<core::Rect>, hit_threshold: f64, win_stride: core::Size, padding: core::Size, scale: f64, final_threshold: f64, use_meanshift_grouping: bool) -> Result<()> {
input_array_arg!(img);
unsafe { sys::cv_HOGDescriptor_detectMultiScale_const_const__InputArrayR_vector_Rect_R_double_Size_Size_double_double_bool(self.as_raw_HOGDescriptor(), img.as_raw__InputArray(), found_locations.as_raw_mut_VectorOfRect(), hit_threshold, win_stride.opencv_as_extern(), padding.opencv_as_extern(), scale, final_threshold, use_meanshift_grouping) }.into_result()
}
/// ## C++ default parameters
/// * padding_tl: Size()
/// * padding_br: Size()
fn compute_gradient(&self, img: &core::Mat, grad: &mut core::Mat, angle_ofs: &mut core::Mat, padding_tl: core::Size, padding_br: core::Size) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_computeGradient_const_const_MatR_MatR_MatR_Size_Size(self.as_raw_HOGDescriptor(), img.as_raw_Mat(), grad.as_raw_mut_Mat(), angle_ofs.as_raw_mut_Mat(), padding_tl.opencv_as_extern(), padding_br.opencv_as_extern()) }.into_result()
}
/// evaluate specified ROI and return confidence value for each location
///
/// ## C++ default parameters
/// * hit_threshold: 0
/// * win_stride: Size()
/// * padding: Size()
fn detect_roi(&self, img: &core::Mat, locations: &core::Vector::<core::Point>, found_locations: &mut core::Vector::<core::Point>, confidences: &mut core::Vector::<f64>, hit_threshold: f64, win_stride: core::Size, padding: core::Size) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_detectROI_const_const_MatR_const_vector_Point_R_vector_Point_R_vector_double_R_double_Size_Size(self.as_raw_HOGDescriptor(), img.as_raw_Mat(), locations.as_raw_VectorOfPoint(), found_locations.as_raw_mut_VectorOfPoint(), confidences.as_raw_mut_VectorOff64(), hit_threshold, win_stride.opencv_as_extern(), padding.opencv_as_extern()) }.into_result()
}
/// evaluate specified ROI and return confidence value for each location in multiple scales
///
/// ## C++ default parameters
/// * hit_threshold: 0
/// * group_threshold: 0
fn detect_multi_scale_roi(&self, img: &core::Mat, found_locations: &mut core::Vector::<core::Rect>, locations: &mut core::Vector::<crate::objdetect::DetectionROI>, hit_threshold: f64, group_threshold: i32) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_detectMultiScaleROI_const_const_MatR_vector_Rect_R_vector_DetectionROI_R_double_int(self.as_raw_HOGDescriptor(), img.as_raw_Mat(), found_locations.as_raw_mut_VectorOfRect(), locations.as_raw_mut_VectorOfDetectionROI(), hit_threshold, group_threshold) }.into_result()
}
/// read/parse Dalal's alt model file
fn read_alt_model(&mut self, modelfile: &str) -> Result<()> {
extern_container_arg!(mut modelfile);
unsafe { sys::cv_HOGDescriptor_readALTModel_String(self.as_raw_mut_HOGDescriptor(), modelfile.opencv_as_extern_mut()) }.into_result()
}
fn group_rectangles(&self, rect_list: &mut core::Vector::<core::Rect>, weights: &mut core::Vector::<f64>, group_threshold: i32, eps: f64) -> Result<()> {
unsafe { sys::cv_HOGDescriptor_groupRectangles_const_vector_Rect_R_vector_double_R_int_double(self.as_raw_HOGDescriptor(), rect_list.as_raw_mut_VectorOfRect(), weights.as_raw_mut_VectorOff64(), group_threshold, eps) }.into_result()
}
}
pub struct HOGDescriptor {
ptr: *mut c_void
}
opencv_type_boxed! { HOGDescriptor }
impl Drop for HOGDescriptor {
fn drop(&mut self) {
extern "C" { fn cv_HOGDescriptor_delete(instance: *mut c_void); }
unsafe { cv_HOGDescriptor_delete(self.as_raw_mut_HOGDescriptor()) };
}
}
impl HOGDescriptor {
#[inline] pub fn as_raw_HOGDescriptor(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_HOGDescriptor(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for HOGDescriptor {}
impl crate::objdetect::HOGDescriptorTrait for HOGDescriptor {
#[inline] fn as_raw_HOGDescriptor(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_HOGDescriptor(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl HOGDescriptor {
pub fn default() -> Result<crate::objdetect::HOGDescriptor> {
unsafe { sys::cv_HOGDescriptor_HOGDescriptor() }.into_result().map(|r| unsafe { crate::objdetect::HOGDescriptor::opencv_from_extern(r) } )
}
/// ## C++ default parameters
/// * _deriv_aperture: 1
/// * _win_sigma: -1
/// * _histogram_norm_type: HOGDescriptor::L2Hys
/// * _l2_hys_threshold: 0.2
/// * _gamma_correction: false
/// * _nlevels: HOGDescriptor::DEFAULT_NLEVELS
/// * _signed_gradient: false
pub fn new(_win_size: core::Size, _block_size: core::Size, _block_stride: core::Size, _cell_size: core::Size, _nbins: i32, _deriv_aperture: i32, _win_sigma: f64, _histogram_norm_type: i32, _l2_hys_threshold: f64, _gamma_correction: bool, _nlevels: i32, _signed_gradient: bool) -> Result<crate::objdetect::HOGDescriptor> {
unsafe { sys::cv_HOGDescriptor_HOGDescriptor_Size_Size_Size_Size_int_int_double_int_double_bool_int_bool(_win_size.opencv_as_extern(), _block_size.opencv_as_extern(), _block_stride.opencv_as_extern(), _cell_size.opencv_as_extern(), _nbins, _deriv_aperture, _win_sigma, _histogram_norm_type, _l2_hys_threshold, _gamma_correction, _nlevels, _signed_gradient) }.into_result().map(|r| unsafe { crate::objdetect::HOGDescriptor::opencv_from_extern(r) } )
}
pub fn new_from_file(filename: &str) -> Result<crate::objdetect::HOGDescriptor> {
extern_container_arg!(filename);
unsafe { sys::cv_HOGDescriptor_HOGDescriptor_const_StringR(filename.opencv_as_extern()) }.into_result().map(|r| unsafe { crate::objdetect::HOGDescriptor::opencv_from_extern(r) } )
}
pub fn copy(d: &crate::objdetect::HOGDescriptor) -> Result<crate::objdetect::HOGDescriptor> {
unsafe { sys::cv_HOGDescriptor_HOGDescriptor_const_HOGDescriptorR(d.as_raw_HOGDescriptor()) }.into_result().map(|r| unsafe { crate::objdetect::HOGDescriptor::opencv_from_extern(r) } )
}
pub fn get_default_people_detector() -> Result<core::Vector::<f32>> {
unsafe { sys::cv_HOGDescriptor_getDefaultPeopleDetector() }.into_result().map(|r| unsafe { core::Vector::<f32>::opencv_from_extern(r) } )
}
pub fn get_daimler_people_detector() -> Result<core::Vector::<f32>> {
unsafe { sys::cv_HOGDescriptor_getDaimlerPeopleDetector() }.into_result().map(|r| unsafe { core::Vector::<f32>::opencv_from_extern(r) } )
}
}
/// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
/// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
pub trait SimilarRectsTrait {
fn as_raw_SimilarRects(&self) -> *const c_void;
fn as_raw_mut_SimilarRects(&mut self) -> *mut c_void;
fn eps(&self) -> f64 {
unsafe { sys::cv_SimilarRects_getPropEps_const(self.as_raw_SimilarRects()) }.into_result().expect("Infallible function failed: eps")
}
fn set_eps(&mut self, val: f64) -> () {
unsafe { sys::cv_SimilarRects_setPropEps_double(self.as_raw_mut_SimilarRects(), val) }.into_result().expect("Infallible function failed: set_eps")
}
}
/// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
/// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
pub struct SimilarRects {
ptr: *mut c_void
}
opencv_type_boxed! { SimilarRects }
impl Drop for SimilarRects {
fn drop(&mut self) {
extern "C" { fn cv_SimilarRects_delete(instance: *mut c_void); }
unsafe { cv_SimilarRects_delete(self.as_raw_mut_SimilarRects()) };
}
}
impl SimilarRects {
#[inline] pub fn as_raw_SimilarRects(&self) -> *const c_void { self.as_raw() }
#[inline] pub fn as_raw_mut_SimilarRects(&mut self) -> *mut c_void { self.as_raw_mut() }
}
unsafe impl Send for SimilarRects {}
impl crate::objdetect::SimilarRectsTrait for SimilarRects {
#[inline] fn as_raw_SimilarRects(&self) -> *const c_void { self.as_raw() }
#[inline] fn as_raw_mut_SimilarRects(&mut self) -> *mut c_void { self.as_raw_mut() }
}
impl SimilarRects {
pub fn new(_eps: f64) -> Result<crate::objdetect::SimilarRects> {
unsafe { sys::cv_SimilarRects_SimilarRects_double(_eps) }.into_result().map(|r| unsafe { crate::objdetect::SimilarRects::opencv_from_extern(r) } )
}
}
| 55.677281 | 465 | 0.762261 |
7aa52f3606056b8d1a4943f8c853982aa837cc6a | 43,572 | use std::fmt;
use std::marker::PhantomData;
use std::ops::{Add, Div, Mul, Sub};
use std::pin::Pin;
use afarray::Array;
use async_trait::async_trait;
use destream::{de, en};
use futures::future::{self, TryFutureExt};
use futures::stream::{self, Stream, StreamExt, TryStreamExt};
use log::debug;
use safecast::{AsType, CastFrom, CastInto};
use tc_btree::{BTreeType, Node};
use tc_error::*;
use tc_transact::fs::{CopyFrom, Dir, File, Persist, Restore};
use tc_transact::{IntoView, Transact, Transaction, TxnId};
use tc_value::{
Float, FloatType, Number, NumberClass, NumberInstance, NumberType, Trigonometry, UIntType,
};
use tcgeneric::{Instance, TCBoxTryFuture};
use super::dense::{BlockListSparse, DenseTensor, PER_BLOCK};
use super::stream::ReadValueAt;
use super::transform;
use super::{
coord_bounds, tile, trig_dtype, AxisBounds, Bounds, Coord, Phantom, Schema, Shape, Tensor,
TensorAccess, TensorBoolean, TensorBooleanConst, TensorCompare, TensorCompareConst,
TensorDiagonal, TensorDualIO, TensorIO, TensorIndex, TensorInstance, TensorMath,
TensorMathConst, TensorPersist, TensorReduce, TensorTransform, TensorTrig, TensorType,
TensorUnary, ERR_COMPLEX_EXPONENT,
};
use access::*;
pub use access::{DenseToSparse, SparseAccess, SparseAccessor, SparseWrite};
use combine::coord_to_offset;
pub use table::SparseTable;
mod access;
mod combine;
mod table;
pub type SparseRow = (Coord, Number);
pub type SparseStream<'a> = Pin<Box<dyn Stream<Item = TCResult<SparseRow>> + Send + Unpin + 'a>>;
const ERR_NOT_SPARSE: &str = "The result of the requested operation would not be sparse; \
convert to a DenseTensor first.";
/// A `Tensor` stored as a `Table` of [`Coord`]s and [`Number`] values
#[derive(Clone)]
pub struct SparseTensor<FD, FS, D, T, A> {
accessor: A,
phantom: Phantom<FD, FS, D, T>,
}
impl<FD, FS, D, T, A> SparseTensor<FD, FS, D, T, A> {
/// Consume this [`SparseTensor`] and return its accessor.
pub fn into_inner(self) -> A {
self.accessor
}
}
impl<FD, FS, D, T, A> Instance for SparseTensor<FD, FS, D, T, A>
where
Self: Send + Sync,
{
type Class = TensorType;
fn class(&self) -> Self::Class {
TensorType::Sparse
}
}
type Condensed<FD, FS, D, T, L, R> =
DenseTensor<FD, FS, D, T, BlockListSparse<FD, FS, D, T, SparseCombinator<FD, FS, D, T, L, R>>>;
impl<FD, FS, D, T, A> SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseAccess<FD, FS, D, T>,
{
fn combine<R: SparseAccess<FD, FS, D, T>>(
self,
other: SparseTensor<FD, FS, D, T, R>,
combinator: fn(Number, Number) -> Number,
) -> TCResult<SparseTensor<FD, FS, D, T, SparseCombinator<FD, FS, D, T, A, R>>> {
if self.shape() != other.shape() {
return Err(TCError::unsupported(format!(
"cannot compare Tensors of different shapes: {}, {}",
self.shape(),
other.shape()
)));
}
let accessor = SparseCombinator::new(self.accessor, other.accessor, combinator)?;
Ok(SparseTensor {
accessor,
phantom: self.phantom,
})
}
fn condense<R>(
self,
other: SparseTensor<FD, FS, D, T, R>,
condensor: fn(Number, Number) -> Number,
) -> TCResult<Condensed<FD, FS, D, T, A, R>>
where
R: SparseAccess<FD, FS, D, T>,
{
if self.shape() != other.shape() {
return Err(TCError::unsupported(format!(
"cannot condense sparse Tensor of size {} with another of size {}",
self.shape(),
other.shape()
)));
}
let accessor = SparseCombinator::new(self.accessor, other.accessor, condensor)?;
let dense = BlockListSparse::from(accessor);
Ok(dense.into())
}
fn left_combine<R>(
self,
other: SparseTensor<FD, FS, D, T, R>,
combinator: fn(Number, Number) -> Number,
) -> TCResult<SparseTensor<FD, FS, D, T, SparseLeftCombinator<FD, FS, D, T, A, R>>>
where
R: SparseAccess<FD, FS, D, T>,
{
if self.shape() != other.shape() {
return Err(TCError::unsupported(format!(
"cannot compare Tensors of different shapes: {}, {}",
self.shape(),
other.shape()
)));
}
let accessor = SparseLeftCombinator::new(self.accessor, other.accessor, combinator)?;
Ok(SparseTensor {
accessor,
phantom: self.phantom,
})
}
}
impl<FD, FS, D, T> SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
{
/// Create a new `SparseTensor` with the given schema
pub async fn create(dir: &D, schema: Schema, txn_id: TxnId) -> TCResult<Self> {
SparseTable::create(dir, schema, txn_id)
.map_ok(Self::from)
.await
}
/// Tile the given `tensor` into a new `SparseTensor`
pub async fn tile(
txn: T,
tensor: SparseTensor<FD, FS, D, T, SparseAccessor<FD, FS, D, T>>,
multiples: Vec<u64>,
) -> TCResult<Self> {
if multiples.len() != tensor.ndim() {
return Err(TCError::bad_request(
"wrong number of multiples to tile a Tensor with shape",
tensor.shape(),
))?;
}
let txn_id = *txn.id();
let dir = txn.context().create_dir_unique(txn_id).await?;
let dtype = tensor.dtype();
let shape = tensor
.shape()
.iter()
.zip(&multiples)
.map(|(dim, m)| dim * m)
.collect();
let input = match tensor.accessor {
SparseAccessor::Table(table) => table.into(),
other => {
let dir = txn.context().create_dir_unique(*txn.id()).await?;
SparseTensor::copy_from(other.into(), dir, &txn).await?
}
};
let output = Self::create(&dir, Schema { shape, dtype }, txn_id).await?;
tile(txn, input, output, multiples).await
}
}
impl<FD, FS, D, T> TensorPersist for SparseTensor<FD, FS, D, T, SparseAccessor<FD, FS, D, T>> {
type Persistent = SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>;
fn as_persistent(self) -> Option<Self::Persistent> {
match self.accessor {
SparseAccessor::Table(table) => Some(table.into()),
_ => None,
}
}
}
impl<FD, FS, D, T, A> TensorAccess for SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseAccess<FD, FS, D, T>,
{
fn dtype(&self) -> NumberType {
self.accessor.dtype()
}
fn ndim(&self) -> usize {
self.accessor.ndim()
}
fn shape(&self) -> &Shape {
self.accessor.shape()
}
fn size(&self) -> u64 {
self.accessor.size()
}
}
impl<FD, FS, D, T, A> TensorInstance for SparseTensor<FD, FS, D, T, A> {
type Dense = DenseTensor<FD, FS, D, T, BlockListSparse<FD, FS, D, T, A>>;
type Sparse = Self;
fn into_dense(self) -> Self::Dense {
BlockListSparse::from(self.into_inner()).into()
}
fn into_sparse(self) -> Self::Sparse {
self
}
}
impl<FD, FS, D, T, L, R> TensorBoolean<SparseTensor<FD, FS, D, T, R>>
for SparseTensor<FD, FS, D, T, L>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
L: SparseAccess<FD, FS, D, T>,
R: SparseAccess<FD, FS, D, T>,
{
type Combine = SparseTensor<FD, FS, D, T, SparseCombinator<FD, FS, D, T, L, R>>;
type LeftCombine = SparseTensor<FD, FS, D, T, SparseLeftCombinator<FD, FS, D, T, L, R>>;
fn and(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::LeftCombine> {
self.left_combine(other, Number::and)
}
fn or(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Combine> {
self.combine(other, Number::or)
}
fn xor(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Combine> {
self.combine(other, Number::xor)
}
}
impl<FD, FS, D, T, A> TensorBoolean<Tensor<FD, FS, D, T>> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
{
type Combine = Tensor<FD, FS, D, T>;
type LeftCombine = Tensor<FD, FS, D, T>;
fn and(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Dense(other) => self.and(other.into_sparse()).map(Tensor::from),
Tensor::Sparse(other) => self.and(other).map(Tensor::from),
}
}
fn or(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Dense(other) => self.into_dense().or(other).map(Tensor::from),
Tensor::Sparse(other) => self.or(other).map(Tensor::from),
}
}
fn xor(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Dense(other) => self.into_dense().xor(other).map(Tensor::from),
Tensor::Sparse(other) => self.xor(other).map(Tensor::from),
}
}
}
impl<FD, FS, D, T, A> TensorBooleanConst for SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseAccess<FD, FS, D, T>,
{
type Combine = SparseTensor<FD, FS, D, T, SparseConstCombinator<FD, FS, D, T, A>>;
fn and_const(self, other: Number) -> TCResult<Self::Combine> {
let access = SparseConstCombinator::new(self.accessor, other, Number::and);
Ok(access.into())
}
fn or_const(self, other: Number) -> TCResult<Self::Combine> {
let access = SparseConstCombinator::new(self.accessor, other, Number::or);
Ok(access.into())
}
fn xor_const(self, other: Number) -> TCResult<Self::Combine> {
let access = SparseConstCombinator::new(self.accessor, other, Number::xor);
Ok(access.into())
}
}
impl<FD, FS, D, T, L, R> TensorCompare<SparseTensor<FD, FS, D, T, R>>
for SparseTensor<FD, FS, D, T, L>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
L: SparseAccess<FD, FS, D, T>,
R: SparseAccess<FD, FS, D, T>,
{
type Compare = SparseTensor<FD, FS, D, T, SparseCombinator<FD, FS, D, T, L, R>>;
type Dense = Condensed<FD, FS, D, T, L, R>;
fn eq(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Dense> {
fn eq(l: Number, r: Number) -> Number {
(l == r).into()
}
self.condense(other, eq)
}
fn gt(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Compare> {
fn gt(l: Number, r: Number) -> Number {
(l > r).into()
}
self.combine(other, gt)
}
fn gte(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Dense> {
fn gte(l: Number, r: Number) -> Number {
(l >= r).into()
}
self.condense(other, gte)
}
fn lt(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Compare> {
fn lt(l: Number, r: Number) -> Number {
(l < r).into()
}
self.combine(other, lt)
}
fn lte(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Dense> {
fn lte(l: Number, r: Number) -> Number {
(l <= r).into()
}
self.condense(other, lte)
}
fn ne(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Compare> {
fn ne(l: Number, r: Number) -> Number {
(l != r).into()
}
self.combine(other, ne)
}
}
impl<FD, FS, D, T, A> TensorCompare<Tensor<FD, FS, D, T>> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
{
type Compare = Tensor<FD, FS, D, T>;
type Dense = Tensor<FD, FS, D, T>;
fn eq(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Dense> {
match other {
Tensor::Dense(other) => self.eq(other.into_sparse()).map(Tensor::from),
Tensor::Sparse(other) => self.eq(other).map(Tensor::from),
}
}
fn gt(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Compare> {
match other {
Tensor::Dense(other) => self.into_dense().gt(other).map(Tensor::from),
Tensor::Sparse(other) => self.gt(other).map(Tensor::from),
}
}
fn gte(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Dense> {
match other {
Tensor::Dense(other) => self.into_dense().gte(other).map(Tensor::from),
Tensor::Sparse(other) => self.gte(other).map(Tensor::from),
}
}
fn lt(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Compare> {
match other {
Tensor::Dense(other) => self.into_dense().gt(other).map(Tensor::from),
Tensor::Sparse(other) => self.gt(other).map(Tensor::from),
}
}
fn lte(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Dense> {
match other {
Tensor::Dense(other) => self.into_dense().lte(other).map(Tensor::from),
Tensor::Sparse(other) => self.lte(other).map(Tensor::from),
}
}
fn ne(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Compare> {
match other {
Tensor::Dense(other) => self.ne(other.into_sparse()).map(Tensor::from),
Tensor::Sparse(other) => self.ne(other).map(Tensor::from),
}
}
}
impl<FD, FS, D, T, A> TensorCompareConst for SparseTensor<FD, FS, D, T, A> {
type Compare = SparseTensor<FD, FS, D, T, SparseConstCombinator<FD, FS, D, T, A>>;
fn eq_const(self, other: Number) -> TCResult<Self::Compare> {
fn eq(l: Number, r: Number) -> Number {
(l == r).into()
}
Ok(SparseConstCombinator::new(self.accessor, other, eq).into())
}
fn gt_const(self, other: Number) -> TCResult<Self::Compare> {
fn gt(l: Number, r: Number) -> Number {
(l > r).into()
}
Ok(SparseConstCombinator::new(self.accessor, other, gt).into())
}
fn gte_const(self, other: Number) -> TCResult<Self::Compare> {
fn gte(l: Number, r: Number) -> Number {
(l >= r).into()
}
Ok(SparseConstCombinator::new(self.accessor, other, gte).into())
}
fn lt_const(self, other: Number) -> TCResult<Self::Compare> {
fn lt(l: Number, r: Number) -> Number {
(l < r).into()
}
Ok(SparseConstCombinator::new(self.accessor, other, lt).into())
}
fn lte_const(self, other: Number) -> TCResult<Self::Compare> {
fn lte(l: Number, r: Number) -> Number {
(l <= r).into()
}
Ok(SparseConstCombinator::new(self.accessor, other, lte).into())
}
fn ne_const(self, other: Number) -> TCResult<Self::Compare> {
fn ne(l: Number, r: Number) -> Number {
(l != r).into()
}
Ok(SparseConstCombinator::new(self.accessor, other, ne).into())
}
}
#[async_trait]
impl<FD, FS, D, T, A> TensorDiagonal<D> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
A: SparseAccess<FD, FS, D, T>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
SparseTable<FD, FS, D, T>: ReadValueAt<D, Txn = T>,
{
type Txn = T;
type Diagonal = SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>;
async fn diagonal(self, txn: Self::Txn) -> TCResult<Self::Diagonal> {
if self.ndim() != 2 {
return Err(TCError::not_implemented(format!(
"diagonal of a {}-dimensional sparse Tensor",
self.ndim()
)));
}
let size = self.shape()[0];
if size != self.shape()[1] {
return Err(TCError::bad_request(
"diagonal requires a square matrix but found",
self.shape(),
));
}
let txn_id = *txn.id();
let dir = txn.context().create_dir_unique(txn_id).await?;
let shape = vec![size].into();
let dtype = self.dtype();
let schema = Schema { shape, dtype };
let table = SparseTable::create(&dir, schema, txn_id).await?;
let filled = self.accessor.filled(txn).await?;
filled
.try_filter_map(|(mut coord, value)| {
future::ready(Ok({
debug_assert!(coord.len() == 2);
debug_assert_ne!(value, value.class().zero());
if coord.pop() == Some(coord[0]) {
Some((coord, value))
} else {
None
}
}))
})
.map_ok(|(coord, value)| table.write_value(txn_id, coord, value))
.try_buffer_unordered(num_cpus::get())
.try_fold((), |(), ()| future::ready(Ok(())))
.await?;
Ok(table.into())
}
}
#[async_trait]
impl<FD, FS, D, T, L> TensorDualIO<D, SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>>
for SparseTensor<FD, FS, D, T, L>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
L: SparseWrite<FD, FS, D, T>,
{
type Txn = T;
async fn write(
self,
txn: T,
bounds: Bounds,
other: SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>,
) -> TCResult<()> {
let slice_shape = bounds.to_shape(self.shape())?;
if &slice_shape != other.shape() {
return Err(TCError::unsupported(format!(
"cannot write tensor of shape {} to slice of shape {}",
other.shape(),
slice_shape,
)));
}
let txn_id = *txn.id();
let filled = other.accessor.filled(txn).await?;
let rebase = transform::Slice::new(self.shape().clone(), bounds)?;
filled
.map_ok(move |(coord, value)| {
let coord = rebase.invert_coord(&coord);
(coord, value)
})
.map_ok(|(coord, value)| self.accessor.write_value(txn_id, coord, value))
.try_buffer_unordered(num_cpus::get())
.try_fold((), |_, _| future::ready(Ok(())))
.await
}
}
#[async_trait]
impl<FD, FS, D, T, A> TensorDualIO<D, Tensor<FD, FS, D, T>> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
A: SparseWrite<FD, FS, D, T>,
{
type Txn = T;
async fn write(
self,
txn: Self::Txn,
bounds: Bounds,
other: Tensor<FD, FS, D, T>,
) -> TCResult<()> {
let shape = bounds.to_shape(self.shape())?;
let other = if other.shape() == &shape {
other
} else {
other.broadcast(shape)?
};
match other {
Tensor::Dense(other) => {
let dir = txn.context().create_dir_unique(*txn.id()).await?;
let other = SparseTensor::copy_from(other.into_sparse(), dir, &txn).await?;
self.write(txn, bounds, other.into_sparse()).await
}
Tensor::Sparse(other) => match other.accessor {
SparseAccessor::Table(table) => {
self.write(txn, bounds, SparseTensor::from(table)).await
}
other => {
let dir = txn.context().create_dir_unique(*txn.id()).await?;
let other = SparseTensor::copy_from(other.into(), dir, &txn).await?;
self.write(txn, bounds, other).await
}
},
}
}
}
#[async_trait]
impl<FD, FS, D, T, A> TensorIndex<D> for SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseWrite<FD, FS, D, T>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
{
type Txn = T;
type Index = SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>;
async fn argmax(self, txn: Self::Txn, axis: usize) -> TCResult<Self::Index> {
if axis >= self.ndim() {
return Err(TCError::unsupported(format!(
"invalid argmax axis for tensor with {} dimensions: {}",
self.ndim(),
axis
)));
}
let shape = {
let mut shape = self.shape().clone();
shape.remove(axis);
shape
};
let schema = Schema {
shape,
dtype: NumberType::UInt(UIntType::U64),
};
let txn_id = *txn.id();
let dir = txn.context().create_dir_unique(txn_id).await?;
let table = SparseTable::create(&dir, schema, txn_id).await?;
let dim = self.shape()[axis];
let zero = self.dtype().zero();
let axes = (0..self.ndim())
.into_iter()
.filter(|x| x != &axis)
.collect();
let mut filled = self.accessor.clone().filled_at(txn.clone(), axes).await?;
while let Some(coords) = filled.try_next().await? {
for coord in coords.to_vec() {
let mut bounds: Bounds = coord.iter().cloned().map(AxisBounds::At).collect();
bounds.insert(axis, AxisBounds::all(dim));
let slice = self.accessor.clone().slice(bounds)?;
debug_assert_eq!(slice.ndim(), 1);
let filled = slice.filled(txn.clone()).await?;
let filled = filled.map_ok(|(offset, value)| (offset[0], value));
let imax = imax(filled, zero, dim).await?;
table.write_value(txn_id, coord, imax.0.into()).await?;
}
}
Ok(table.into())
}
async fn argmax_all(self, txn: Self::Txn) -> TCResult<u64> {
let zero = self.dtype().zero();
let size = self.size();
let coord_bounds = coord_bounds(self.shape());
let filled = self.accessor.filled(txn).await?;
let filled =
filled.map_ok(move |(coord, value)| (coord_to_offset(&coord, &coord_bounds), value));
let imax = imax(filled, zero, size).await?;
Ok(imax.0)
}
}
async fn imax<F>(mut filled: F, zero: Number, size: u64) -> TCResult<(u64, Number)>
where
F: Stream<Item = TCResult<(u64, Number)>> + Unpin,
{
let mut first_empty = Some(0);
let mut last = 0u64;
let mut imax = None;
while let Some((offset, value)) = filled.try_next().await? {
if offset == 0 {
first_empty = None;
} else if first_empty.is_none() {
if offset > (last + 1) {
first_empty = Some(last + 1)
}
}
if let Some((ref mut i, ref mut max)) = &mut imax {
if value > *max {
*i = offset;
*max = value;
}
} else {
imax = Some((offset, value));
}
last = offset;
}
if first_empty.is_none() && last < (size - 1) {
if last == 0 && imax.is_none() {
first_empty = Some(0);
} else {
first_empty = Some(last + 1);
}
}
if let Some((i, max)) = imax {
if max > zero {
Ok((i, max))
} else if let Some(first_empty) = first_empty {
Ok((first_empty, zero))
} else {
Ok((i, max))
}
} else {
Ok((0, zero))
}
}
#[async_trait]
impl<FD, FS, D, T, A> TensorIO<D> for SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseWrite<FD, FS, D, T>,
{
type Txn = T;
async fn read_value(self, txn: Self::Txn, coord: Coord) -> TCResult<Number> {
self.accessor
.read_value_at(txn, coord)
.map_ok(|(_, value)| value)
.await
}
async fn write_value(&self, txn_id: TxnId, mut bounds: Bounds, value: Number) -> TCResult<()> {
if self.shape().is_empty() {
return self.accessor.write_value(txn_id, vec![], value).await;
}
bounds.normalize(self.shape());
debug!("SparseTensor::write_value {} to bounds, {}", value, bounds);
stream::iter(bounds.affected())
.map(|coord| self.accessor.write_value(txn_id, coord, value))
.buffer_unordered(num_cpus::get())
.try_fold((), |_, _| future::ready(Ok(())))
.await
}
async fn write_value_at(&self, txn_id: TxnId, coord: Coord, value: Number) -> TCResult<()> {
self.accessor.write_value(txn_id, coord, value).await
}
}
impl<FD, FS, D, T, L, R> TensorMath<D, SparseTensor<FD, FS, D, T, R>>
for SparseTensor<FD, FS, D, T, L>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
L: SparseAccess<FD, FS, D, T>,
R: SparseAccess<FD, FS, D, T>,
{
type Combine = SparseTensor<FD, FS, D, T, SparseCombinator<FD, FS, D, T, L, R>>;
type LeftCombine = SparseTensor<FD, FS, D, T, SparseLeftCombinator<FD, FS, D, T, L, R>>;
fn add(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Combine> {
debug!("SparseTensor::add");
self.combine(other, Number::add)
}
fn div(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::LeftCombine> {
debug!("SparseTensor::div");
fn div(l: Number, r: Number) -> Number {
// to prevent a divide-by-zero error, treat the right-hand side as if it doesn't exist
if r == r.class().zero() {
Ord::max(l.class(), r.class()).zero()
} else {
l / r
}
}
self.left_combine(other, div)
}
fn log(self, base: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::LeftCombine> {
if base.dtype().is_complex() {
return Err(TCError::unsupported(ERR_COMPLEX_EXPONENT));
}
fn log(n: Number, base: Number) -> Number {
n.log(Float::cast_from(base))
}
self.left_combine(base, log)
}
fn mul(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::LeftCombine> {
debug!("SparseTensor::mul");
self.left_combine(other, Number::mul)
}
fn pow(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::LeftCombine> {
if other.dtype().is_complex() {
return Err(TCError::unsupported(ERR_COMPLEX_EXPONENT));
}
debug!("SparseTensor::pow");
self.left_combine(other, Number::pow)
}
fn sub(self, other: SparseTensor<FD, FS, D, T, R>) -> TCResult<Self::Combine> {
debug!("SparseTensor::sub");
self.combine(other, Number::sub)
}
}
impl<FD, FS, D, T, A> TensorMath<D, Tensor<FD, FS, D, T>> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
{
type Combine = Tensor<FD, FS, D, T>;
type LeftCombine = Tensor<FD, FS, D, T>;
fn add(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Sparse(sparse) => self.add(sparse).map(Tensor::from),
Tensor::Dense(dense) => self.into_dense().add(dense).map(Tensor::from),
}
}
fn div(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Sparse(sparse) => self.div(sparse).map(Tensor::from),
Tensor::Dense(dense) => self.div(dense.into_sparse()).map(Tensor::from),
}
}
fn log(self, base: Tensor<FD, FS, D, T>) -> TCResult<Self::LeftCombine> {
match base {
Tensor::Sparse(sparse) => self.log(sparse).map(Tensor::from),
Tensor::Dense(dense) => self.log(dense.into_sparse()).map(Tensor::from),
}
}
fn mul(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Sparse(sparse) => self.mul(sparse).map(Tensor::from),
Tensor::Dense(dense) => self.mul(dense.into_sparse()).map(Tensor::from),
}
}
fn pow(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Sparse(sparse) => self.mul(sparse).map(Tensor::from),
Tensor::Dense(dense) => self.mul(dense.into_sparse()).map(Tensor::from),
}
}
fn sub(self, other: Tensor<FD, FS, D, T>) -> TCResult<Self::Combine> {
match other {
Tensor::Sparse(sparse) => self.sub(sparse).map(Tensor::from),
Tensor::Dense(dense) => self.into_dense().sub(dense).map(Tensor::from),
}
}
}
impl<FD, FS, D, T, A> TensorMathConst for SparseTensor<FD, FS, D, T, A> {
type Combine = SparseTensor<FD, FS, D, T, SparseConstCombinator<FD, FS, D, T, A>>;
fn add_const(self, other: Number) -> TCResult<Self::Combine> {
Ok(SparseConstCombinator::new(self.accessor, other, Number::add).into())
}
fn div_const(self, other: Number) -> TCResult<Self::Combine> {
Ok(SparseConstCombinator::new(self.accessor, other, Number::div).into())
}
fn log_const(self, base: Number) -> TCResult<Self::Combine> {
if base.class().is_complex() {
return Err(TCError::unsupported(ERR_COMPLEX_EXPONENT));
}
fn log(n: Number, base: Number) -> Number {
if let Number::Float(base) = base {
n.log(base)
} else {
unreachable!("log with non-floating point base")
}
}
let base = Number::Float(base.cast_into());
Ok(SparseConstCombinator::new(self.accessor, base, log).into())
}
fn mul_const(self, other: Number) -> TCResult<Self::Combine> {
Ok(SparseConstCombinator::new(self.accessor, other, Number::mul).into())
}
fn pow_const(self, other: Number) -> TCResult<Self::Combine> {
if !other.class().is_real() {
return Err(TCError::unsupported(ERR_COMPLEX_EXPONENT));
}
Ok(SparseConstCombinator::new(self.accessor, other, Number::pow).into())
}
fn sub_const(self, other: Number) -> TCResult<Self::Combine> {
Ok(SparseConstCombinator::new(self.accessor, other, Number::sub).into())
}
}
impl<FD, FS, D, T, A> TensorReduce<D> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
Self: TensorInstance,
<Self as TensorInstance>::Dense: TensorReduce<D, Txn = T> + Send + Sync,
{
type Txn = T;
type Reduce = SparseTensor<FD, FS, D, T, SparseReduce<FD, FS, D, T>>;
fn product(self, axis: usize) -> TCResult<Self::Reduce> {
let accessor = SparseReduce::new(
self.accessor.accessor(),
axis,
SparseTensor::<FD, FS, D, T, SparseAccessor<FD, FS, D, T>>::product_all,
)?;
Ok(SparseTensor::from(accessor))
}
fn product_all(&self, txn: T) -> TCBoxTryFuture<Number> {
Box::pin(async move { self.clone().into_dense().product_all(txn).await })
}
fn sum(self, axis: usize) -> TCResult<Self::Reduce> {
let accessor = SparseReduce::new(
self.accessor.accessor(),
axis,
SparseTensor::<FD, FS, D, T, SparseAccessor<FD, FS, D, T>>::sum_all,
)?;
Ok(SparseTensor::from(accessor))
}
fn sum_all(&self, txn: T) -> TCBoxTryFuture<Number> {
Box::pin(async move {
let mut sum = self.dtype().zero();
let mut filled = self.accessor.clone().filled(txn).await?;
let mut buffer = Vec::with_capacity(PER_BLOCK);
while let Some((_coord, value)) = filled.try_next().await? {
buffer.push(value);
if buffer.len() == PER_BLOCK {
sum += Array::from(buffer.to_vec()).sum();
buffer.clear()
}
}
if !buffer.is_empty() {
sum += Array::from(buffer).sum();
}
Ok(sum)
})
}
}
impl<FD, FS, D, T, A> TensorTransform for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
{
type Broadcast = SparseTensor<FD, FS, D, T, SparseBroadcast<FD, FS, D, T, A>>;
type Cast = SparseTensor<FD, FS, D, T, SparseCast<FD, FS, D, T, A>>;
type Expand = SparseTensor<FD, FS, D, T, SparseExpand<FD, FS, D, T, A>>;
type Flip = SparseTensor<FD, FS, D, T, SparseFlip<FD, FS, D, T, A>>;
type Reshape = SparseTensor<FD, FS, D, T, SparseReshape<FD, FS, D, T, A>>;
type Slice = SparseTensor<FD, FS, D, T, A::Slice>;
type Transpose = SparseTensor<FD, FS, D, T, A::Transpose>;
fn broadcast(self, shape: Shape) -> TCResult<Self::Broadcast> {
let accessor = SparseBroadcast::new(self.accessor, shape)?;
Ok(accessor.into())
}
fn cast_into(self, dtype: NumberType) -> TCResult<Self::Cast> {
let accessor = SparseCast::new(self.accessor, dtype);
Ok(accessor.into())
}
fn expand_dims(self, axis: usize) -> TCResult<Self::Expand> {
let accessor = SparseExpand::new(self.accessor, axis)?;
Ok(accessor.into())
}
fn flip(self, axis: usize) -> TCResult<Self::Flip> {
let accessor = SparseFlip::new(self.accessor, axis)?;
Ok(accessor.into())
}
fn reshape(self, shape: Shape) -> TCResult<Self::Reshape> {
let accessor = SparseReshape::new(self.accessor, shape)?;
Ok(accessor.into())
}
fn slice(self, bounds: Bounds) -> TCResult<Self::Slice> {
let accessor = self.accessor.slice(bounds)?;
Ok(accessor.into())
}
fn transpose(self, permutation: Option<Vec<usize>>) -> TCResult<Self::Transpose> {
let accessor = self.accessor.transpose(permutation)?;
Ok(accessor.into())
}
}
macro_rules! trig {
($fun:ident) => {
fn $fun(&self) -> TCResult<Self::Unary> {
let dtype = trig_dtype(self.dtype());
let source = self.accessor.clone().accessor();
let accessor = SparseUnary::new(source, Number::$fun, dtype);
Ok(SparseTensor::from(accessor))
}
};
}
#[async_trait]
impl<FD, FS, D, T, A> TensorTrig for SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseAccess<FD, FS, D, T>,
{
type Unary = SparseTensor<FD, FS, D, T, SparseUnary<FD, FS, D, T>>;
trig! {asin}
trig! {sin}
trig! {sinh}
trig! {asinh}
trig! {acos}
trig! {cos}
trig! {cosh}
trig! {acosh}
trig! {atan}
trig! {tan}
trig! {tanh}
trig! {atanh}
}
#[async_trait]
impl<FD, FS, D, T, A> TensorUnary<D> for SparseTensor<FD, FS, D, T, A>
where
FD: File<Array>,
FS: File<Node>,
D: Dir,
T: Transaction<D>,
A: SparseAccess<FD, FS, D, T>,
{
type Txn = T;
type Unary = SparseTensor<FD, FS, D, T, SparseUnary<FD, FS, D, T>>;
fn abs(&self) -> TCResult<Self::Unary> {
let source = self.accessor.clone().accessor();
let transform = <Number as NumberInstance>::abs;
let accessor = SparseUnary::new(source, transform, self.dtype().one().abs().class());
Ok(SparseTensor::from(accessor))
}
fn exp(&self) -> TCResult<Self::Unary> {
fn exp(n: Number) -> Number {
let n = f64::cast_from(n);
n.exp().into()
}
let dtype = NumberType::Float(FloatType::F64);
let source = self.accessor.clone().accessor();
let accessor = SparseUnary::new(source, exp, dtype);
Ok(SparseTensor::from(accessor))
}
fn ln(&self) -> TCResult<Self::Unary> {
let dtype = self.dtype().one().ln().class();
let source = self.accessor.clone().accessor();
let accessor = SparseUnary::new(source, Number::ln, dtype);
Ok(SparseTensor::from(accessor))
}
fn round(&self) -> TCResult<Self::Unary> {
let dtype = self.dtype().one().ln().class();
let source = self.accessor.clone().accessor();
let accessor = SparseUnary::new(source, Number::round, dtype);
Ok(SparseTensor::from(accessor))
}
async fn all(self, txn: Self::Txn) -> TCResult<bool> {
let affected = stream::iter(Bounds::all(self.shape()).affected());
let filled = self.accessor.filled(txn).await?;
let mut coords = filled
.map_ok(|(coord, _)| coord)
.zip(affected)
.map(|(r, expected)| r.map(|actual| (actual, expected)));
while let Some((actual, expected)) = coords.try_next().await? {
if actual != expected {
return Ok(false);
}
}
Ok(true)
}
async fn any(self, txn: Self::Txn) -> TCResult<bool> {
let mut filled = self.accessor.filled(txn).await?;
Ok(filled.next().await.is_some())
}
fn not(&self) -> TCResult<Self::Unary> {
Err(TCError::unsupported(ERR_NOT_SPARSE))
}
}
#[async_trait]
impl<FD, FS, D, T, A> CopyFrom<D, SparseTensor<FD, FS, D, T, A>>
for SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
{
async fn copy_from(
instance: SparseTensor<FD, FS, D, T, A>,
store: Self::Store,
txn: &Self::Txn,
) -> TCResult<Self> {
SparseTable::copy_from(instance, store, txn)
.map_ok(Self::from)
.await
}
}
#[async_trait]
impl<FD, FS, D, T> Persist<D> for SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
{
type Schema = Schema;
type Store = D;
type Txn = T;
fn schema(&self) -> &Self::Schema {
self.accessor.schema()
}
async fn load(txn: &Self::Txn, schema: Self::Schema, store: Self::Store) -> TCResult<Self> {
SparseTable::load(txn, schema, store)
.map_ok(Self::from)
.await
}
}
#[async_trait]
impl<FD, FS, D, T> Restore<D> for SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
{
async fn restore(&self, backup: &Self, txn_id: TxnId) -> TCResult<()> {
self.accessor.restore(&backup.accessor, txn_id).await
}
}
#[async_trait]
impl<FD, FS, D, T> Transact for SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>
where
Self: Send + Sync,
SparseTable<FD, FS, D, T>: Transact + Send + Sync,
{
async fn commit(&self, txn_id: &TxnId) {
self.accessor.commit(txn_id).await
}
async fn finalize(&self, txn_id: &TxnId) {
self.accessor.finalize(txn_id).await
}
}
impl<FD, FS, D, T, A> From<A> for SparseTensor<FD, FS, D, T, A> {
fn from(accessor: A) -> Self {
Self {
accessor,
phantom: Phantom::default(),
}
}
}
impl<FD, FS, D, T, A> fmt::Display for SparseTensor<FD, FS, D, T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("a sparse Tensor")
}
}
#[async_trait]
impl<'en, FD, FS, D, T, A> IntoView<'en, D> for SparseTensor<FD, FS, D, T, A>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<TensorType>,
A: SparseAccess<FD, FS, D, T>,
{
type Txn = T;
type View = SparseTensorView<'en>;
async fn into_view(self, txn: Self::Txn) -> TCResult<Self::View> {
let shape = self.shape().clone();
let dtype = self.dtype();
Ok(SparseTensorView {
schema: Schema { shape, dtype },
filled: self.accessor.filled(txn).await?,
})
}
}
#[async_trait]
impl<FD, FS, D, T> de::FromStream for SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
{
type Context = T;
async fn from_stream<De: de::Decoder>(txn: T, decoder: &mut De) -> Result<Self, De::Error> {
decoder.decode_seq(SparseTensorVisitor::new(txn)).await
}
}
struct SparseTensorVisitor<FD, FS, D, T> {
txn: T,
dense: PhantomData<FD>,
sparse: PhantomData<FS>,
dir: PhantomData<D>,
}
impl<FD, FS, D, T> SparseTensorVisitor<FD, FS, D, T> {
fn new(txn: T) -> Self {
Self {
txn,
dense: PhantomData,
sparse: PhantomData,
dir: PhantomData,
}
}
}
#[async_trait]
impl<FD, FS, D, T> de::Visitor for SparseTensorVisitor<FD, FS, D, T>
where
D: Dir,
T: Transaction<D>,
FD: File<Array>,
FS: File<Node>,
D::File: AsType<FD> + AsType<FS>,
D::FileClass: From<BTreeType> + From<TensorType>,
{
type Value = SparseTensor<FD, FS, D, T, SparseTable<FD, FS, D, T>>;
fn expecting() -> &'static str {
"a SparseTensor"
}
async fn visit_seq<A: de::SeqAccess>(self, mut seq: A) -> Result<Self::Value, A::Error> {
let schema = seq.next_element::<Schema>(()).await?;
let schema = schema.ok_or_else(|| de::Error::invalid_length(0, "tensor schema"))?;
schema.validate("load Sparse").map_err(de::Error::custom)?;
let txn_id = *self.txn.id();
let table = SparseTable::create(self.txn.context(), schema, txn_id)
.map_err(de::Error::custom)
.await?;
if let Some(table) = seq
.next_element::<SparseTable<FD, FS, D, T>>((table.clone(), txn_id))
.await?
{
Ok(SparseTensor::from(table))
} else {
Ok(SparseTensor::from(table))
}
}
}
pub struct SparseTensorView<'en> {
schema: Schema,
filled: SparseStream<'en>,
}
impl<'en> en::IntoStream<'en> for SparseTensorView<'en> {
fn into_stream<E: en::Encoder<'en>>(self, encoder: E) -> Result<E::Ok, E::Error> {
let filled = en::SeqStream::from(self.filled);
(self.schema, filled).into_stream(encoder)
}
}
| 30.619817 | 99 | 0.552189 |
9bea98eb1601fa93aa0bd2da68dfc4a04804bc20 | 122,881 | //! OpenGL shading language backend
//!
//! The main structure is [`Writer`](Writer), it maintains internal state that is used
//! to output a [`Module`](crate::Module) into glsl
//!
//! # Supported versions
//! ### Core
//! - 330
//! - 400
//! - 410
//! - 420
//! - 430
//! - 450
//! - 460
//!
//! ### ES
//! - 300
//! - 310
//!
// GLSL is mostly a superset of C but it also removes some parts of it this is a list of relevant
// aspects for this backend.
//
// The most notable change is the introduction of the version preprocessor directive that must
// always be the first line of a glsl file and is written as
// `#version number profile`
// `number` is the version itself (i.e. 300) and `profile` is the
// shader profile we only support "core" and "es", the former is used in desktop applications and
// the later is used in embedded contexts, mobile devices and browsers. Each one as it's own
// versions (at the time of writing this the latest version for "core" is 460 and for "es" is 320)
//
// Other important preprocessor addition is the extension directive which is written as
// `#extension name: behaviour`
// Extensions provide increased features in a plugin fashion but they aren't required to be
// supported hence why they are called extensions, that's why `behaviour` is used it specifies
// wether the extension is strictly required or if it should only be enabled if needed. In our case
// when we use extensions we set behaviour to `require` always.
//
// The only thing that glsl removes that makes a difference are pointers.
//
// Additions that are relevant for the backend are the discard keyword, the introduction of
// vector, matrices, samplers, image types and functions that provide common shader operations
pub use features::Features;
use crate::{
back,
proc::{self, NameKey},
valid, Handle, ShaderStage, TypeInner,
};
use features::FeaturesManager;
use std::{
cmp::Ordering,
fmt,
fmt::{Error as FmtError, Write},
};
use thiserror::Error;
/// Contains the features related code and the features querying method
mod features;
/// Contains a constant with a slice of all the reserved keywords RESERVED_KEYWORDS
mod keywords;
/// List of supported core glsl versions
pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[330, 400, 410, 420, 430, 440, 450];
/// List of supported es glsl versions
pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320];
pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, u8>;
impl crate::AtomicFunction {
fn to_glsl(self) -> &'static str {
match self {
Self::Add | Self::Subtract => "Add",
Self::And => "And",
Self::InclusiveOr => "Or",
Self::ExclusiveOr => "Xor",
Self::Min => "Min",
Self::Max => "Max",
Self::Exchange { compare: None } => "Exchange",
Self::Exchange { compare: Some(_) } => "", //TODO
}
}
}
impl crate::StorageClass {
fn is_buffer(&self) -> bool {
match *self {
crate::StorageClass::Uniform | crate::StorageClass::Storage { .. } => true,
_ => false,
}
}
}
//Note: similar to `back/spv/helpers.rs`
fn global_needs_wrapper(ir_module: &crate::Module, global_ty: Handle<crate::Type>) -> bool {
match ir_module.types[global_ty].inner {
crate::TypeInner::Struct {
ref members,
span: _,
} => match ir_module.types[members.last().unwrap().ty].inner {
// Structs with dynamically sized arrays can't be copied and can't be wrapped.
crate::TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} => false,
_ => true,
},
_ => false,
}
}
/// glsl version
#[derive(Debug, Copy, Clone, PartialEq)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub enum Version {
/// `core` glsl
Desktop(u16),
/// `es` glsl
Embedded(u16),
}
impl Version {
/// Returns true if self is `Version::Embedded` (i.e. is a es version)
fn is_es(&self) -> bool {
match *self {
Version::Desktop(_) => false,
Version::Embedded(_) => true,
}
}
/// Checks the list of currently supported versions and returns true if it contains the
/// specified version
///
/// # Notes
/// As an invalid version number will never be added to the supported version list
/// so this also checks for version validity
fn is_supported(&self) -> bool {
match *self {
Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v),
Version::Embedded(v) => SUPPORTED_ES_VERSIONS.contains(&v),
}
}
/// Checks if the version supports all of the explicit layouts:
/// - `location=` qualifiers for bindings
/// - `binding=` qualifiers for resources
///
/// Note: `location=` for vertex inputs and fragment outputs is supported
/// unconditionally for GLES 300.
fn supports_explicit_locations(&self) -> bool {
*self >= Version::Embedded(310) || *self >= Version::Desktop(410)
}
fn supports_early_depth_test(&self) -> bool {
*self >= Version::Desktop(130) || *self >= Version::Embedded(310)
}
fn supports_std430_layout(&self) -> bool {
*self >= Version::Desktop(430) || *self >= Version::Embedded(310)
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self, *other) {
(Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)),
(Version::Embedded(x), Version::Embedded(y)) => Some(x.cmp(&y)),
_ => None,
}
}
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Version::Desktop(v) => write!(f, "{} core", v),
Version::Embedded(v) => write!(f, "{} es", v),
}
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct WriterFlags: u32 {
/// Flip output Y and extend Z from (0,1) to (-1,1).
const ADJUST_COORDINATE_SPACE = 0x1;
/// Supports GL_EXT_texture_shadow_lod on the host, which provides
/// additional functions on shadows and arrays of shadows.
const TEXTURE_SHADOW_LOD = 0x2;
}
}
/// Structure that contains the configuration used in the [`Writer`](Writer)
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct Options {
/// The glsl version to be used
pub version: Version,
/// Configuration flags for the writer.
pub writer_flags: WriterFlags,
/// Map of resources association to binding locations.
pub binding_map: BindingMap,
}
impl Default for Options {
fn default() -> Self {
Options {
version: Version::Embedded(310),
writer_flags: WriterFlags::ADJUST_COORDINATE_SPACE,
binding_map: BindingMap::default(),
}
}
}
// A subset of options that are meant to be changed per pipeline.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct PipelineOptions {
/// The stage of the entry point
pub shader_stage: ShaderStage,
/// The name of the entry point
///
/// If no entry point that matches is found a error will be thrown while creating a new instance
/// of [`Writer`](struct.Writer.html)
pub entry_point: String,
}
/// Structure that contains a reflection info
pub struct ReflectionInfo {
pub texture_mapping: crate::FastHashMap<String, TextureMapping>,
pub uniforms: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
}
/// Structure that connects a texture to a sampler or not
///
/// glsl pre vulkan has no concept of separate textures and samplers instead everything is a
/// `gsamplerN` where `g` is the scalar type and `N` is the dimension, but naga uses separate textures
/// and samplers in the IR so the backend produces a [`HashMap`](crate::FastHashMap) with the texture name
/// as a key and a [`TextureMapping`](TextureMapping) as a value this way the user knows where to bind.
///
/// [`Storage`](crate::ImageClass::Storage) images produce `gimageN` and don't have an associated sampler
/// so the [`sampler`](Self::sampler) field will be [`None`](std::option::Option::None)
#[derive(Debug, Clone)]
pub struct TextureMapping {
/// Handle to the image global variable
pub texture: Handle<crate::GlobalVariable>,
/// Handle to the associated sampler global variable if it exists
pub sampler: Option<Handle<crate::GlobalVariable>>,
}
/// Helper structure that generates a number
#[derive(Default)]
struct IdGenerator(u32);
impl IdGenerator {
/// Generates a number that's guaranteed to be unique for this `IdGenerator`
fn generate(&mut self) -> u32 {
// It's just an increasing number but it does the job
let ret = self.0;
self.0 += 1;
ret
}
}
/// Helper wrapper used to get a name for a varying
///
/// Varying have different naming schemes depending on their binding:
/// - Varyings with builtin bindings get the from [`glsl_built_in`](glsl_built_in).
/// - Varyings with location bindings are named `_S_location_X` where `S` is a
/// prefix identifying which pipeline stage the varying connects, and `X` is
/// the location.
struct VaryingName<'a> {
binding: &'a crate::Binding,
stage: ShaderStage,
output: bool,
}
impl fmt::Display for VaryingName<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self.binding {
crate::Binding::Location { location, .. } => {
let prefix = match (self.stage, self.output) {
(ShaderStage::Compute, _) => unreachable!(),
// pipeline to vertex
(ShaderStage::Vertex, false) => "p2vs",
// vertex to fragment
(ShaderStage::Vertex, true) | (ShaderStage::Fragment, false) => "vs2fs",
// fragment to pipeline
(ShaderStage::Fragment, true) => "fs2p",
};
write!(f, "_{}_location{}", prefix, location,)
}
crate::Binding::BuiltIn(built_in) => {
write!(f, "{}", glsl_built_in(built_in, self.output))
}
}
}
}
/// Shorthand result used internally by the backend
type BackendResult<T = ()> = Result<T, Error>;
/// A glsl compilation error.
#[derive(Debug, Error)]
pub enum Error {
/// A error occurred while writing to the output
#[error("Format error")]
FmtError(#[from] FmtError),
/// The specified [`Version`](Version) doesn't have all required [`Features`](super)
///
/// Contains the missing [`Features`](Features)
#[error("The selected version doesn't support {0:?}")]
MissingFeatures(Features),
/// [`StorageClass::PushConstant`](crate::StorageClass::PushConstant) was used and isn't
/// supported in the glsl backend
#[error("Push constants aren't supported")]
PushConstantNotSupported,
/// The specified [`Version`](Version) isn't supported
#[error("The specified version isn't supported")]
VersionNotSupported,
/// The entry point couldn't be found
#[error("The requested entry point couldn't be found")]
EntryPointNotFound,
/// A call was made to an unsupported external
#[error("A call was made to an unsupported external: {0}")]
UnsupportedExternal(String),
/// A scalar with an unsupported width was requested
#[error("A scalar with an unsupported width was requested: {0:?} {1:?}")]
UnsupportedScalar(crate::ScalarKind, crate::Bytes),
/// A image was used with multiple samplers, this isn't supported
#[error("A image was used with multiple samplers")]
ImageMultipleSamplers,
#[error("{0}")]
Custom(String),
}
/// Binary operation with a different logic on the GLSL side
enum BinaryOperation {
/// Vector comparison should use the function like `greaterThan()`, etc.
VectorCompare,
/// GLSL `%` is SPIR-V `OpUMod/OpSMod` and `mod()` is `OpFMod`, but [`BinaryOperator::Modulo`](crate::BinaryOperator::Modulo) is `OpFRem`
Modulo,
/// Any plain operation. No additional logic required
Other,
}
/// Main structure of the glsl backend responsible for all code generation
pub struct Writer<'a, W> {
// Inputs
/// The module being written
module: &'a crate::Module,
/// The module analysis.
info: &'a valid::ModuleInfo,
/// The output writer
out: W,
/// User defined configuration to be used
options: &'a Options,
// Internal State
/// Features manager used to store all the needed features and write them
features: FeaturesManager,
namer: proc::Namer,
/// A map with all the names needed for writing the module
/// (generated by a [`Namer`](crate::proc::Namer))
names: crate::FastHashMap<NameKey, String>,
/// A map with the names of global variables needed for reflections
reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
/// The selected entry point
entry_point: &'a crate::EntryPoint,
/// The index of the selected entry point
entry_point_idx: proc::EntryPointIndex,
/// Used to generate a unique number for blocks
block_id: IdGenerator,
/// Set of expressions that have associated temporary variables
named_expressions: crate::NamedExpressions,
}
impl<'a, W: Write> Writer<'a, W> {
/// Creates a new [`Writer`](Writer) instance
///
/// # Errors
/// - If the version specified isn't supported (or invalid)
/// - If the entry point couldn't be found on the module
/// - If the version specified doesn't support some used features
pub fn new(
out: W,
module: &'a crate::Module,
info: &'a valid::ModuleInfo,
options: &'a Options,
pipeline_options: &'a PipelineOptions,
) -> Result<Self, Error> {
// Check if the requested version is supported
if !options.version.is_supported() {
log::error!("Version {}", options.version);
return Err(Error::VersionNotSupported);
}
// Try to find the entry point and corresponding index
let ep_idx = module
.entry_points
.iter()
.position(|ep| {
pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
})
.ok_or(Error::EntryPointNotFound)?;
// Generate a map with names required to write the module
let mut names = crate::FastHashMap::default();
let mut namer = proc::Namer::default();
namer.reset(module, keywords::RESERVED_KEYWORDS, &["gl_"], &mut names);
// Build the instance
let mut this = Self {
module,
info,
out,
options,
namer,
features: FeaturesManager::new(),
names,
reflection_names_globals: crate::FastHashMap::default(),
entry_point: &module.entry_points[ep_idx],
entry_point_idx: ep_idx as u16,
block_id: IdGenerator::default(),
named_expressions: crate::NamedExpressions::default(),
};
// Find all features required to print this module
this.collect_required_features()?;
Ok(this)
}
/// Writes the [`Module`](crate::Module) as glsl to the output
///
/// # Notes
/// If an error occurs while writing, the output might have been written partially
///
/// # Panics
/// Might panic if the module is invalid
pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
// We use `writeln!(self.out)` throughout the write to add newlines
// to make the output more readable
let es = self.options.version.is_es();
// Write the version (It must be the first thing or it isn't a valid glsl output)
writeln!(self.out, "#version {}", self.options.version)?;
// Write all the needed extensions
//
// This used to be the last thing being written as it allowed to search for features while
// writing the module saving some loops but some older versions (420 or less) required the
// extensions to appear before being used, even though extensions are part of the
// preprocessor not the processor ¯\_(ツ)_/¯
self.features.write(self.options.version, &mut self.out)?;
// Write the additional extensions
if self
.options
.writer_flags
.contains(WriterFlags::TEXTURE_SHADOW_LOD)
{
// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_shadow_lod.txt
writeln!(self.out, "#extension GL_EXT_texture_shadow_lod : require")?;
}
// glsl es requires a precision to be specified for floats and ints
// TODO: Should this be user configurable?
if es {
writeln!(self.out)?;
writeln!(self.out, "precision highp float;")?;
writeln!(self.out, "precision highp int;")?;
writeln!(self.out)?;
}
if self.entry_point.stage == ShaderStage::Compute {
let workgroup_size = self.entry_point.workgroup_size;
writeln!(
self.out,
"layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
workgroup_size[0], workgroup_size[1], workgroup_size[2]
)?;
writeln!(self.out)?;
}
// Enable early depth tests if needed
if let Some(depth_test) = self.entry_point.early_depth_test {
// If early depth test is supported for this version of GLSL
if self.options.version.supports_early_depth_test() {
writeln!(self.out, "layout(early_fragment_tests) in;")?;
if let Some(conservative) = depth_test.conservative {
use crate::ConservativeDepth as Cd;
let depth = match conservative {
Cd::GreaterEqual => "greater",
Cd::LessEqual => "less",
Cd::Unchanged => "unchanged",
};
writeln!(self.out, "layout (depth_{}) out float gl_FragDepth;", depth)?;
}
writeln!(self.out)?;
} else {
log::warn!(
"Early depth testing is not supported for this version of GLSL: {}",
self.options.version
);
}
}
let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
// Write all structs
//
// This are always ordered because of the IR is structured in a way that you can't make a
// struct without adding all of it's members first
for (handle, ty) in self.module.types.iter() {
if let TypeInner::Struct { ref members, .. } = ty.inner {
let used_by_global = self.module.global_variables.iter().any(|(vh, var)| {
!ep_info[vh].is_empty() && var.class.is_buffer() && var.ty == handle
});
let is_wrapped = global_needs_wrapper(self.module, handle);
// If it's a global non-wrapped struct, it will be printed
// with the corresponding global variable.
if !used_by_global || is_wrapped {
let name = &self.names[&NameKey::Type(handle)];
write!(self.out, "struct {} ", name)?;
self.write_struct_body(handle, members)?;
writeln!(self.out, ";")?;
}
}
}
// Write the globals
//
// We filter all globals that aren't used by the selected entry point as they might be
// interfere with each other (i.e. two globals with the same location but different with
// different classes)
for (handle, global) in self.module.global_variables.iter() {
if ep_info[handle].is_empty() {
continue;
}
match self.module.types[global.ty].inner {
// We treat images separately because they might require
// writing the storage format
TypeInner::Image {
mut dim,
arrayed,
class,
} => {
// Gather the storage format if needed
let storage_format_access = match self.module.types[global.ty].inner {
TypeInner::Image {
class: crate::ImageClass::Storage { format, access },
..
} => Some((format, access)),
_ => None,
};
if dim == crate::ImageDimension::D1 && es {
dim = crate::ImageDimension::D2
}
// Gether the location if needed
let layout_binding = if self.options.version.supports_explicit_locations() {
let br = global.binding.as_ref().unwrap();
self.options.binding_map.get(br).cloned()
} else {
None
};
// Write all the layout qualifiers
if layout_binding.is_some() || storage_format_access.is_some() {
write!(self.out, "layout(")?;
if let Some(binding) = layout_binding {
write!(self.out, "binding = {}", binding)?;
}
if let Some((format, _)) = storage_format_access {
let format_str = glsl_storage_format(format);
let separator = match layout_binding {
Some(_) => ",",
None => "",
};
write!(self.out, "{}{}", separator, format_str)?;
}
write!(self.out, ") ")?;
}
if let Some((_, access)) = storage_format_access {
self.write_storage_access(access)?;
}
// All images in glsl are `uniform`
// The trailing space is important
write!(self.out, "uniform ")?;
// write the type
//
// This is way we need the leading space because `write_image_type` doesn't add
// any spaces at the beginning or end
self.write_image_type(dim, arrayed, class)?;
// Finally write the name and end the global with a `;`
// The leading space is important
let global_name = self.get_global_name(handle, global);
writeln!(self.out, " {};", global_name)?;
writeln!(self.out)?;
self.reflection_names_globals.insert(handle, global_name);
}
// glsl has no concept of samplers so we just ignore it
TypeInner::Sampler { .. } => continue,
// All other globals are written by `write_global`
_ => {
if !ep_info[handle].is_empty() {
self.write_global(handle, global)?;
// Add a newline (only for readability)
writeln!(self.out)?;
}
}
}
}
for arg in self.entry_point.function.arguments.iter() {
self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
}
if let Some(ref result) = self.entry_point.function.result {
self.write_varying(result.binding.as_ref(), result.ty, true)?;
}
writeln!(self.out)?;
// Write all regular functions
for (handle, function) in self.module.functions.iter() {
// Check that the function doesn't use globals that aren't supported
// by the current entry point
if !ep_info.dominates_global_use(&self.info[handle]) {
continue;
}
let fun_info = &self.info[handle];
// Write the function
self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
writeln!(self.out)?;
}
self.write_function(
back::FunctionType::EntryPoint(self.entry_point_idx),
&self.entry_point.function,
ep_info,
)?;
// Add newline at the end of file
writeln!(self.out)?;
// Collect all relection info and return it to the user
self.collect_reflection_info()
}
fn write_array_size(&mut self, size: crate::ArraySize) -> BackendResult {
write!(self.out, "[")?;
// Write the array size
// Writes nothing if `ArraySize::Dynamic`
// Panics if `ArraySize::Constant` has a constant that isn't an sint or uint
match size {
crate::ArraySize::Constant(const_handle) => {
match self.module.constants[const_handle].inner {
crate::ConstantInner::Scalar {
width: _,
value: crate::ScalarValue::Uint(size),
} => write!(self.out, "{}", size)?,
crate::ConstantInner::Scalar {
width: _,
value: crate::ScalarValue::Sint(size),
} => write!(self.out, "{}", size)?,
_ => unreachable!(),
}
}
crate::ArraySize::Dynamic => (),
}
write!(self.out, "]")?;
Ok(())
}
/// Helper method used to write value types
///
/// # Notes
/// Adds no trailing or leading whitespace
///
/// # Panics
/// - If type is either a image, a sampler, a pointer, or a struct
/// - If it's an Array with a [`ArraySize::Constant`](crate::ArraySize::Constant) with a
/// constant that isn't a [`Scalar`](crate::ConstantInner::Scalar) or if the
/// scalar value isn't an [`Sint`](crate::ScalarValue::Sint) or [`Uint`](crate::ScalarValue::Uint)
fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
match *inner {
// Scalars are simple we just get the full name from `glsl_scalar`
TypeInner::Scalar { kind, width }
| TypeInner::Atomic { kind, width }
| TypeInner::ValuePointer {
size: None,
kind,
width,
class: _,
} => write!(self.out, "{}", glsl_scalar(kind, width)?.full)?,
// Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
TypeInner::Vector { size, kind, width }
| TypeInner::ValuePointer {
size: Some(size),
kind,
width,
class: _,
} => write!(
self.out,
"{}vec{}",
glsl_scalar(kind, width)?.prefix,
size as u8
)?,
// Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
// doubles are allowed), `M` is the columns count and `N` is the rows count
//
// glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
// extra branch to write matrices this way
TypeInner::Matrix {
columns,
rows,
width,
} => write!(
self.out,
"{}mat{}x{}",
glsl_scalar(crate::ScalarKind::Float, width)?.prefix,
columns as u8,
rows as u8
)?,
// GLSL arrays are written as `type name[size]`
// Current code is written arrays only as `[size]`
// Base `type` and `name` should be written outside
TypeInner::Array { size, .. } => self.write_array_size(size)?,
// Panic if either Image, Sampler, Pointer, or a Struct is being written
//
// Write all variants instead of `_` so that if new variants are added a
// no exhaustiveness error is thrown
TypeInner::Pointer { .. }
| TypeInner::Struct { .. }
| TypeInner::Image { .. }
| TypeInner::Sampler { .. } => {
return Err(Error::Custom(format!("Unable to write type {:?}", inner)))
}
}
Ok(())
}
/// Helper method used to write non image/sampler types
///
/// # Notes
/// Adds no trailing or leading whitespace
///
/// # Panics
/// - If type is either a image or sampler
/// - If it's an Array with a [`ArraySize::Constant`](crate::ArraySize::Constant) with a
/// constant that isn't a [`Scalar`](crate::ConstantInner::Scalar) or if the
/// scalar value isn't an [`Sint`](crate::ScalarValue::Sint) or [`Uint`](crate::ScalarValue::Uint)
fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
match self.module.types[ty].inner {
// glsl has no pointer types so just write types as normal and loads are skipped
TypeInner::Pointer { base, .. } => self.write_type(base),
// glsl structs are written as just the struct name
TypeInner::Struct { .. } => {
// Get the struct name
let name = &self.names[&NameKey::Type(ty)];
write!(self.out, "{}", name)?;
Ok(())
}
// glsl array has the size separated from the base type
TypeInner::Array { base, .. } => self.write_type(base),
ref other => self.write_value_type(other),
}
}
/// Helper method to write a image type
///
/// # Notes
/// Adds no leading or trailing whitespace
fn write_image_type(
&mut self,
dim: crate::ImageDimension,
arrayed: bool,
class: crate::ImageClass,
) -> BackendResult {
// glsl images consist of four parts the scalar prefix, the image "type", the dimensions
// and modifiers
//
// There exists two image types
// - sampler - for sampled images
// - image - for storage images
//
// There are three possible modifiers that can be used together and must be written in
// this order to be valid
// - MS - used if it's a multisampled image
// - Array - used if it's an image array
// - Shadow - used if it's a depth image
use crate::ImageClass as Ic;
let (base, kind, ms, comparison) = match class {
Ic::Sampled { kind, multi: true } => ("sampler", kind, "MS", ""),
Ic::Sampled { kind, multi: false } => ("sampler", kind, "", ""),
Ic::Depth { multi: true } => ("sampler", crate::ScalarKind::Float, "MS", ""),
Ic::Depth { multi: false } => ("sampler", crate::ScalarKind::Float, "", "Shadow"),
Ic::Storage { format, .. } => ("image", format.into(), "", ""),
};
write!(
self.out,
"highp {}{}{}{}{}{}",
glsl_scalar(kind, 4)?.prefix,
base,
glsl_dimension(dim),
ms,
if arrayed { "Array" } else { "" },
comparison
)?;
Ok(())
}
/// Helper method used to write non images/sampler globals
///
/// # Notes
/// Adds a newline
///
/// # Panics
/// If the global has type sampler
fn write_global(
&mut self,
handle: Handle<crate::GlobalVariable>,
global: &crate::GlobalVariable,
) -> BackendResult {
if self.options.version.supports_explicit_locations() {
if let Some(ref br) = global.binding {
match self.options.binding_map.get(br) {
Some(binding) => {
let layout = match global.class {
crate::StorageClass::Storage { .. } => {
if self.options.version.supports_std430_layout() {
"std430, "
} else {
"std140, "
}
}
crate::StorageClass::Uniform => "std140, ",
_ => "",
};
write!(self.out, "layout({}binding = {}) ", layout, binding)?
}
None => {
log::debug!("unassigned binding for {:?}", global.name);
if let crate::StorageClass::Storage { .. } = global.class {
if self.options.version.supports_std430_layout() {
write!(self.out, "layout(std430) ")?
}
}
}
}
}
}
if let crate::StorageClass::Storage { access } = global.class {
self.write_storage_access(access)?;
}
// Write the storage class
// Trailing space is important
if let Some(storage_class) = glsl_storage_class(global.class) {
write!(self.out, "{} ", storage_class)?;
}
// If struct is a block we need to write `block_name { members }` where `block_name` must be
// unique between blocks and structs so we add `_block_ID` where `ID` is a `IdGenerator`
// generated number so it's unique and `members` are the same as in a struct
// Write the block name, it's just the struct name appended with `_block_ID`
let needs_wrapper = if global.class.is_buffer() {
let ty_name = &self.names[&NameKey::Type(global.ty)];
let block_name = format!(
"{}_block_{}{:?}",
ty_name,
self.block_id.generate(),
self.entry_point.stage,
);
write!(self.out, "{} ", block_name)?;
self.reflection_names_globals.insert(handle, block_name);
let needs_wrapper = global_needs_wrapper(self.module, global.ty);
if needs_wrapper {
write!(self.out, "{{ ")?;
// Write the type
// `write_type` adds no leading or trailing spaces
self.write_type(global.ty)?;
} else if let crate::TypeInner::Struct { ref members, .. } =
self.module.types[global.ty].inner
{
self.write_struct_body(global.ty, members)?;
}
needs_wrapper
} else {
self.write_type(global.ty)?;
false
};
// Finally write the global name and end the global with a `;` and a newline
// Leading space is important
write!(self.out, " ")?;
self.write_global_name(handle, global)?;
if let TypeInner::Array { size, .. } = self.module.types[global.ty].inner {
self.write_array_size(size)?;
}
if is_value_init_supported(self.module, global.ty) {
write!(self.out, " = ")?;
if let Some(init) = global.init {
self.write_constant(init)?;
} else {
self.write_zero_init_value(global.ty)?;
}
}
if needs_wrapper {
write!(self.out, "; }}")?;
}
writeln!(self.out, ";")?;
Ok(())
}
/// Helper method used to get a name for a global
///
/// Globals have different naming schemes depending on their binding:
/// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
/// - Globals with resource binding are named `_group_X_binding_Y` where `X`
/// is the group and `Y` is the binding
fn get_global_name(
&self,
handle: Handle<crate::GlobalVariable>,
global: &crate::GlobalVariable,
) -> String {
match global.binding {
Some(ref br) => {
format!("_group_{}_binding_{}", br.group, br.binding)
}
None => self.names[&NameKey::GlobalVariable(handle)].clone(),
}
}
/// Helper method used to write a name for a global without additional heap allocation
fn write_global_name(
&mut self,
handle: Handle<crate::GlobalVariable>,
global: &crate::GlobalVariable,
) -> BackendResult {
match global.binding {
Some(ref br) => write!(self.out, "_group_{}_binding_{}", br.group, br.binding)?,
None => write!(
self.out,
"{}",
&self.names[&NameKey::GlobalVariable(handle)]
)?,
}
Ok(())
}
/// Writes the varying declaration.
fn write_varying(
&mut self,
binding: Option<&crate::Binding>,
ty: Handle<crate::Type>,
output: bool,
) -> Result<(), Error> {
match self.module.types[ty].inner {
crate::TypeInner::Struct { ref members, .. } => {
for member in members {
self.write_varying(member.binding.as_ref(), member.ty, output)?;
}
}
_ => {
let (location, interpolation, sampling) = match binding {
Some(&crate::Binding::Location {
location,
interpolation,
sampling,
}) => (location, interpolation, sampling),
_ => return Ok(()),
};
// Write the interpolation modifier if needed
//
// We ignore all interpolation and auxiliary modifiers that aren't used in fragment
// shaders' input globals or vertex shaders' output globals.
let emit_interpolation_and_auxiliary = match self.entry_point.stage {
ShaderStage::Vertex => output,
ShaderStage::Fragment => !output,
_ => false,
};
// Write the I/O locations, if allowed
if self.options.version.supports_explicit_locations()
|| !emit_interpolation_and_auxiliary
{
write!(self.out, "layout(location = {}) ", location)?;
}
// Write the interpolation qualifier.
if let Some(interp) = interpolation {
if emit_interpolation_and_auxiliary {
write!(self.out, "{} ", glsl_interpolation(interp))?;
}
}
// Write the sampling auxiliary qualifier.
//
// Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
// immediately before the `in` / `out` qualifier, so we'll just follow that rule
// here, regardless of the version.
if let Some(sampling) = sampling {
if emit_interpolation_and_auxiliary {
if let Some(qualifier) = glsl_sampling(sampling) {
write!(self.out, "{} ", qualifier)?;
}
}
}
// Write the input/output qualifier.
write!(self.out, "{} ", if output { "out" } else { "in" })?;
// Write the type
// `write_type` adds no leading or trailing spaces
self.write_type(ty)?;
// Finally write the global name and end the global with a `;` and a newline
// Leading space is important
let vname = VaryingName {
binding: &crate::Binding::Location {
location,
interpolation: None,
sampling: None,
},
stage: self.entry_point.stage,
output,
};
writeln!(self.out, " {};", vname)?;
}
}
Ok(())
}
/// Helper method used to write functions (both entry points and regular functions)
///
/// # Notes
/// Adds a newline
fn write_function(
&mut self,
ty: back::FunctionType,
func: &crate::Function,
info: &valid::FunctionInfo,
) -> BackendResult {
// Create a function context for the function being written
let ctx = back::FunctionCtx {
ty,
info,
expressions: &func.expressions,
named_expressions: &func.named_expressions,
};
self.named_expressions.clear();
// Write the function header
//
// glsl headers are the same as in c:
// `ret_type name(args)`
// `ret_type` is the return type
// `name` is the function name
// `args` is a comma separated list of `type name`
// | - `type` is the argument type
// | - `name` is the argument name
// Start by writing the return type if any otherwise write void
// This is the only place where `void` is a valid type
// (though it's more a keyword than a type)
if let back::FunctionType::EntryPoint(_) = ctx.ty {
write!(self.out, "void")?;
} else if let Some(ref result) = func.result {
self.write_type(result.ty)?;
} else {
write!(self.out, "void")?;
}
// Write the function name and open parentheses for the argument list
let function_name = match ctx.ty {
back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
back::FunctionType::EntryPoint(_) => "main",
};
write!(self.out, " {}(", function_name)?;
// Write the comma separated argument list
//
// We need access to `Self` here so we use the reference passed to the closure as an
// argument instead of capturing as that would cause a borrow checker error
let arguments = match ctx.ty {
back::FunctionType::EntryPoint(_) => &[][..],
back::FunctionType::Function(_) => &func.arguments,
};
let arguments: Vec<_> = arguments
.iter()
.filter(|arg| match self.module.types[arg.ty].inner {
TypeInner::Sampler { .. } => false,
_ => true,
})
.collect();
self.write_slice(&arguments, |this, i, arg| {
// Write the argument type
match this.module.types[arg.ty].inner {
// We treat images separately because they might require
// writing the storage format
TypeInner::Image {
dim,
arrayed,
class,
} => {
// Write the storage format if needed
if let TypeInner::Image {
class: crate::ImageClass::Storage { format, .. },
..
} = this.module.types[arg.ty].inner
{
write!(this.out, "layout({}) ", glsl_storage_format(format))?;
}
// write the type
//
// This is way we need the leading space because `write_image_type` doesn't add
// any spaces at the beginning or end
this.write_image_type(dim, arrayed, class)?;
}
TypeInner::Pointer { base, .. } => {
// write parameter qualifiers
write!(this.out, "inout ")?;
this.write_type(base)?;
}
// All other types are written by `write_type`
_ => {
this.write_type(arg.ty)?;
}
}
// Write the argument name
// The leading space is important
write!(this.out, " {}", &this.names[&ctx.argument_key(i)])?;
Ok(())
})?;
// Close the parentheses and open braces to start the function body
writeln!(self.out, ") {{")?;
// Compose the function arguments from globals, in case of an entry point.
if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
let stage = self.module.entry_points[ep_index as usize].stage;
for (index, arg) in func.arguments.iter().enumerate() {
write!(self.out, "{}", back::INDENT)?;
self.write_type(arg.ty)?;
let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
write!(self.out, " {}", name)?;
write!(self.out, " = ")?;
match self.module.types[arg.ty].inner {
crate::TypeInner::Struct { ref members, .. } => {
self.write_type(arg.ty)?;
write!(self.out, "(")?;
for (index, member) in members.iter().enumerate() {
let varying_name = VaryingName {
binding: member.binding.as_ref().unwrap(),
stage,
output: false,
};
if index != 0 {
write!(self.out, ", ")?;
}
write!(self.out, "{}", varying_name)?;
}
writeln!(self.out, ");")?;
}
_ => {
let varying_name = VaryingName {
binding: arg.binding.as_ref().unwrap(),
stage,
output: false,
};
writeln!(self.out, "{};", varying_name)?;
}
}
}
}
// Write all function locals
// Locals are `type name (= init)?;` where the init part (including the =) are optional
//
// Always adds a newline
for (handle, local) in func.local_variables.iter() {
// Write indentation (only for readability) and the type
// `write_type` adds no trailing space
write!(self.out, "{}", back::INDENT)?;
self.write_type(local.ty)?;
// Write the local name
// The leading space is important
write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
// Write size for array type
if let TypeInner::Array { size, .. } = self.module.types[local.ty].inner {
self.write_array_size(size)?;
}
// Write the local initializer if needed
if let Some(init) = local.init {
// Put the equal signal only if there's a initializer
// The leading and trailing spaces aren't needed but help with readability
write!(self.out, " = ")?;
// Write the constant
// `write_constant` adds no trailing or leading space/newline
self.write_constant(init)?;
} else if is_value_init_supported(self.module, local.ty) {
write!(self.out, " = ")?;
self.write_zero_init_value(local.ty)?;
}
// Finish the local with `;` and add a newline (only for readability)
writeln!(self.out, ";")?
}
// Write the function body (statement list)
for sta in func.body.iter() {
// Write a statement, the indentation should always be 1 when writing the function body
// `write_stmt` adds a newline
self.write_stmt(sta, &ctx, back::Level(1))?;
}
// Close braces and add a newline
writeln!(self.out, "}}")?;
Ok(())
}
/// Helper method that writes a list of comma separated `T` with a writer function `F`
///
/// The writer function `F` receives a mutable reference to `self` that if needed won't cause
/// borrow checker issues (using for example a closure with `self` will cause issues), the
/// second argument is the 0 based index of the element on the list, and the last element is
/// a reference to the element `T` being written
///
/// # Notes
/// - Adds no newlines or leading/trailing whitespace
/// - The last element won't have a trailing `,`
fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
&mut self,
data: &[T],
mut f: F,
) -> BackendResult {
// Loop trough `data` invoking `f` for each element
for (i, item) in data.iter().enumerate() {
f(self, i as u32, item)?;
// Only write a comma if isn't the last element
if i != data.len().saturating_sub(1) {
// The leading space is for readability only
write!(self.out, ", ")?;
}
}
Ok(())
}
/// Helper method used to write constants
///
/// # Notes
/// Adds no newlines or leading/trailing whitespace
fn write_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
use crate::ScalarValue as Sv;
match self.module.constants[handle].inner {
crate::ConstantInner::Scalar {
width: _,
ref value,
} => match *value {
// Signed integers don't need anything special
Sv::Sint(int) => write!(self.out, "{}", int)?,
// Unsigned integers need a `u` at the end
//
// While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
// always write it as the extra branch wouldn't have any benefit in readability
Sv::Uint(int) => write!(self.out, "{}u", int)?,
// Floats are written using `Debug` instead of `Display` because it always appends the
// decimal part even it's zero which is needed for a valid glsl float constant
Sv::Float(float) => write!(self.out, "{:?}", float)?,
// Booleans are either `true` or `false` so nothing special needs to be done
Sv::Bool(boolean) => write!(self.out, "{}", boolean)?,
},
// Composite constant are created using the same syntax as compose
// `type(components)` where `components` is a comma separated list of constants
crate::ConstantInner::Composite { ty, ref components } => {
self.write_type(ty)?;
write!(self.out, "(")?;
// Write the comma separated constants
self.write_slice(components, |this, _, arg| this.write_constant(*arg))?;
write!(self.out, ")")?
}
}
Ok(())
}
/// Helper method used to write structs
///
/// # Notes
/// Ends in a newline
fn write_struct_body(
&mut self,
handle: Handle<crate::Type>,
members: &[crate::StructMember],
) -> BackendResult {
// glsl structs are written as in C
// `struct name() { members };`
// | `struct` is a keyword
// | `name` is the struct name
// | `members` is a semicolon separated list of `type name`
// | `type` is the member type
// | `name` is the member name
writeln!(self.out, "{{")?;
for (idx, member) in members.iter().enumerate() {
// The indentation is only for readability
write!(self.out, "{}", back::INDENT)?;
match self.module.types[member.ty].inner {
TypeInner::Array {
base,
size,
stride: _,
} => {
self.write_type(base)?;
write!(
self.out,
" {}",
&self.names[&NameKey::StructMember(handle, idx as u32)]
)?;
// Write [size]
self.write_array_size(size)?;
// Newline is important
writeln!(self.out, ";")?;
}
_ => {
// Write the member type
// Adds no trailing space
self.write_type(member.ty)?;
// Write the member name and put a semicolon
// The leading space is important
// All members must have a semicolon even the last one
writeln!(
self.out,
" {};",
&self.names[&NameKey::StructMember(handle, idx as u32)]
)?;
}
}
}
write!(self.out, "}}")?;
Ok(())
}
/// Helper method used to write statements
///
/// # Notes
/// Always adds a newline
fn write_stmt(
&mut self,
sta: &crate::Statement,
ctx: &back::FunctionCtx,
level: back::Level,
) -> BackendResult {
use crate::Statement;
match *sta {
// This is where we can generate intermediate constants for some expression types.
Statement::Emit(ref range) => {
for handle in range.clone() {
let info = &ctx.info[handle];
let ptr_class = info.ty.inner_with(&self.module.types).pointer_class();
let expr_name = if ptr_class.is_some() {
// GLSL can't save a pointer-valued expression in a variable,
// but we shouldn't ever need to: they should never be named expressions,
// and none of the expression types flagged by bake_ref_count can be pointer-valued.
None
} else if let Some(name) = ctx.named_expressions.get(&handle) {
// Front end provides names for all variables at the start of writing.
// But we write them to step by step. We need to recache them
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
} else {
let min_ref_count = ctx.expressions[handle].bake_ref_count();
if min_ref_count <= info.ref_count {
Some(format!("{}{}", super::BAKE_PREFIX, handle.index()))
} else {
None
}
};
if let Some(name) = expr_name {
write!(self.out, "{}", level)?;
self.write_named_expr(handle, name, ctx)?;
}
}
}
// Blocks are simple we just need to write the block statements between braces
// We could also just print the statements but this is more readable and maps more
// closely to the IR
Statement::Block(ref block) => {
write!(self.out, "{}", level)?;
writeln!(self.out, "{{")?;
for sta in block.iter() {
// Increase the indentation to help with readability
self.write_stmt(sta, ctx, level.next())?
}
writeln!(self.out, "{}}}", level)?
}
// Ifs are written as in C:
// ```
// if(condition) {
// accept
// } else {
// reject
// }
// ```
Statement::If {
condition,
ref accept,
ref reject,
} => {
write!(self.out, "{}", level)?;
write!(self.out, "if (")?;
self.write_expr(condition, ctx)?;
writeln!(self.out, ") {{")?;
for sta in accept {
// Increase indentation to help with readability
self.write_stmt(sta, ctx, level.next())?;
}
// If there are no statements in the reject block we skip writing it
// This is only for readability
if !reject.is_empty() {
writeln!(self.out, "{}}} else {{", level)?;
for sta in reject {
// Increase indentation to help with readability
self.write_stmt(sta, ctx, level.next())?;
}
}
writeln!(self.out, "{}}}", level)?
}
// Switch are written as in C:
// ```
// switch (selector) {
// // Fallthrough
// case label:
// block
// // Non fallthrough
// case label:
// block
// break;
// default:
// block
// }
// ```
// Where the `default` case happens isn't important but we put it last
// so that we don't need to print a `break` for it
Statement::Switch {
selector,
ref cases,
} => {
// Start the switch
write!(self.out, "{}", level)?;
write!(self.out, "switch(")?;
self.write_expr(selector, ctx)?;
writeln!(self.out, ") {{")?;
let type_postfix = match *ctx.info[selector].ty.inner_with(&self.module.types) {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
..
} => "u",
_ => "",
};
// Write all cases
let l2 = level.next();
for case in cases {
match case.value {
crate::SwitchValue::Integer(value) => {
writeln!(self.out, "{}case {}{}:", l2, value, type_postfix)?
}
crate::SwitchValue::Default => writeln!(self.out, "{}default:", l2)?,
}
for sta in case.body.iter() {
self.write_stmt(sta, ctx, l2.next())?;
}
// Write fallthrough comment if the case is fallthrough,
// otherwise write a break, if the case is not already
// broken out of at the end of its body.
if case.fall_through {
writeln!(self.out, "{}/* fallthrough */", l2.next())?;
} else if case.body.last().map_or(true, |s| !s.is_terminator()) {
writeln!(self.out, "{}break;", l2.next())?;
}
}
writeln!(self.out, "{}}}", level)?
}
// Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
// while true loop and appending the continuing block to the body resulting on:
// ```
// bool loop_init = true;
// while(true) {
// if (!loop_init) { <continuing> }
// loop_init = false;
// <body>
// }
// ```
Statement::Loop {
ref body,
ref continuing,
} => {
if !continuing.is_empty() {
let gate_name = self.namer.call("loop_init");
writeln!(self.out, "{}bool {} = true;", level, gate_name)?;
writeln!(self.out, "{}while(true) {{", level)?;
writeln!(self.out, "{}if (!{}) {{", level.next(), gate_name)?;
for sta in continuing {
self.write_stmt(sta, ctx, level.next())?;
}
writeln!(self.out, "{}}}", level.next())?;
writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
} else {
writeln!(self.out, "{}while(true) {{", level)?;
}
for sta in body {
self.write_stmt(sta, ctx, level.next())?;
}
writeln!(self.out, "{}}}", level)?
}
// Break, continue and return as written as in C
// `break;`
Statement::Break => {
write!(self.out, "{}", level)?;
writeln!(self.out, "break;")?
}
// `continue;`
Statement::Continue => {
write!(self.out, "{}", level)?;
writeln!(self.out, "continue;")?
}
// `return expr;`, `expr` is optional
Statement::Return { value } => {
write!(self.out, "{}", level)?;
match ctx.ty {
back::FunctionType::Function(_) => {
write!(self.out, "return")?;
// Write the expression to be returned if needed
if let Some(expr) = value {
write!(self.out, " ")?;
self.write_expr(expr, ctx)?;
}
writeln!(self.out, ";")?;
}
back::FunctionType::EntryPoint(ep_index) => {
let ep = &self.module.entry_points[ep_index as usize];
if let Some(ref result) = ep.function.result {
let value = value.unwrap();
match self.module.types[result.ty].inner {
crate::TypeInner::Struct { ref members, .. } => {
let temp_struct_name = match ctx.expressions[value] {
crate::Expression::Compose { .. } => {
let return_struct = "_tmp_return";
write!(
self.out,
"{} {} = ",
&self.names[&NameKey::Type(result.ty)],
return_struct
)?;
self.write_expr(value, ctx)?;
writeln!(self.out, ";")?;
write!(self.out, "{}", level)?;
Some(return_struct)
}
_ => None,
};
for (index, member) in members.iter().enumerate() {
// TODO: handle builtin in better way
if let Some(crate::Binding::BuiltIn(builtin)) =
member.binding
{
match builtin {
crate::BuiltIn::ClipDistance
| crate::BuiltIn::CullDistance
| crate::BuiltIn::PointSize => {
if self.options.version.is_es() {
continue;
}
}
_ => {}
}
}
let varying_name = VaryingName {
binding: member.binding.as_ref().unwrap(),
stage: ep.stage,
output: true,
};
write!(self.out, "{} = ", varying_name)?;
if let Some(struct_name) = temp_struct_name {
write!(self.out, "{}", struct_name)?;
} else {
self.write_expr(value, ctx)?;
}
// Write field name
writeln!(
self.out,
".{};",
&self.names
[&NameKey::StructMember(result.ty, index as u32)]
)?;
write!(self.out, "{}", level)?;
}
}
_ => {
let name = VaryingName {
binding: result.binding.as_ref().unwrap(),
stage: ep.stage,
output: true,
};
write!(self.out, "{} = ", name)?;
self.write_expr(value, ctx)?;
writeln!(self.out, ";")?;
write!(self.out, "{}", level)?;
}
}
}
if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
if self.module.entry_points[ep_index as usize].stage
== crate::ShaderStage::Vertex
&& self
.options
.writer_flags
.contains(WriterFlags::ADJUST_COORDINATE_SPACE)
{
writeln!(
self.out,
"gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
)?;
write!(self.out, "{}", level)?;
}
}
writeln!(self.out, "return;")?;
}
}
}
// This is one of the places were glsl adds to the syntax of C in this case the discard
// keyword which ceases all further processing in a fragment shader, it's called OpKill
// in spir-v that's why it's called `Statement::Kill`
Statement::Kill => writeln!(self.out, "{}discard;", level)?,
// Issue an execution or a memory barrier.
Statement::Barrier(flags) => {
if flags.is_empty() {
writeln!(self.out, "{}barrier();", level)?;
} else {
writeln!(self.out, "{}groupMemoryBarrier();", level)?;
}
}
// Stores in glsl are just variable assignments written as `pointer = value;`
Statement::Store { pointer, value } => {
write!(self.out, "{}", level)?;
self.write_expr(pointer, ctx)?;
write!(self.out, " = ")?;
self.write_expr(value, ctx)?;
writeln!(self.out, ";")?
}
// Stores a value into an image.
Statement::ImageStore {
image,
coordinate,
array_index,
value,
} => {
write!(self.out, "{}", level)?;
// This will only panic if the module is invalid
let dim = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image { dim, .. } => dim,
_ => unreachable!(),
};
write!(self.out, "imageStore(")?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
self.write_texture_coordinates(coordinate, array_index, dim, ctx)?;
write!(self.out, ", ")?;
self.write_expr(value, ctx)?;
writeln!(self.out, ");")?;
}
// A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
Statement::Call {
function,
ref arguments,
result,
} => {
write!(self.out, "{}", level)?;
if let Some(expr) = result {
let name = format!("{}{}", super::BAKE_PREFIX, expr.index());
let result = self.module.functions[function].result.as_ref().unwrap();
self.write_type(result.ty)?;
write!(self.out, " {} = ", name)?;
self.named_expressions.insert(expr, name);
}
write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
let arguments: Vec<_> = arguments
.iter()
.enumerate()
.filter_map(|(i, arg)| {
let arg_ty = self.module.functions[function].arguments[i].ty;
match self.module.types[arg_ty].inner {
TypeInner::Sampler { .. } => None,
_ => Some(*arg),
}
})
.collect();
self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
writeln!(self.out, ");")?
}
Statement::Atomic {
pointer,
ref fun,
value,
result,
} => {
write!(self.out, "{}", level)?;
let res_name = format!("{}{}", super::BAKE_PREFIX, result.index());
let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
self.write_value_type(res_ty)?;
write!(self.out, " {} = ", res_name)?;
self.named_expressions.insert(result, res_name);
let fun_str = fun.to_glsl();
write!(self.out, "atomic{}(", fun_str)?;
self.write_expr(pointer, ctx)?;
write!(self.out, ", ")?;
// handle the special cases
match *fun {
crate::AtomicFunction::Subtract => {
// we just wrote `InterlockedAdd`, so negate the argument
write!(self.out, "-")?;
}
crate::AtomicFunction::Exchange { compare: Some(_) } => {
return Err(Error::Custom(
"atomic CompareExchange is not implemented".to_string(),
));
}
_ => {}
}
self.write_expr(value, ctx)?;
writeln!(self.out, ");")?;
}
}
Ok(())
}
/// Helper method to write expressions
///
/// # Notes
/// Doesn't add any newlines or leading/trailing spaces
fn write_expr(
&mut self,
expr: Handle<crate::Expression>,
ctx: &back::FunctionCtx<'_>,
) -> BackendResult {
use crate::Expression;
if let Some(name) = self.named_expressions.get(&expr) {
write!(self.out, "{}", name)?;
return Ok(());
}
match ctx.expressions[expr] {
// `Access` is applied to arrays, vectors and matrices and is written as indexing
Expression::Access { base, index } => {
self.write_expr(base, ctx)?;
write!(self.out, "[")?;
self.write_expr(index, ctx)?;
write!(self.out, "]")?
}
// `AccessIndex` is the same as `Access` except that the index is a constant and it can
// be applied to structs, in this case we need to find the name of the field at that
// index and write `base.field_name`
Expression::AccessIndex { base, index } => {
self.write_expr(base, ctx)?;
let base_ty_res = &ctx.info[base].ty;
let mut resolved = base_ty_res.inner_with(&self.module.types);
let base_ty_handle = match *resolved {
TypeInner::Pointer { base, class: _ } => {
resolved = &self.module.types[base].inner;
Some(base)
}
_ => base_ty_res.handle(),
};
match *resolved {
TypeInner::Vector { .. } => {
// Write vector access as a swizzle
write!(self.out, ".{}", back::COMPONENTS[index as usize])?
}
TypeInner::Matrix { .. }
| TypeInner::Array { .. }
| TypeInner::ValuePointer { .. } => write!(self.out, "[{}]", index)?,
TypeInner::Struct { .. } => {
// This will never panic in case the type is a `Struct`, this is not true
// for other types so we can only check while inside this match arm
let ty = base_ty_handle.unwrap();
write!(
self.out,
".{}",
&self.names[&NameKey::StructMember(ty, index)]
)?
}
ref other => return Err(Error::Custom(format!("Cannot index {:?}", other))),
}
}
// Constants are delegated to `write_constant`
Expression::Constant(constant) => self.write_constant(constant)?,
// `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
Expression::Splat { size: _, value } => {
let resolved = ctx.info[expr].ty.inner_with(&self.module.types);
self.write_value_type(resolved)?;
write!(self.out, "(")?;
self.write_expr(value, ctx)?;
write!(self.out, ")")?
}
// `Swizzle` adds a few letters behind the dot.
Expression::Swizzle {
size,
vector,
pattern,
} => {
self.write_expr(vector, ctx)?;
write!(self.out, ".")?;
for &sc in pattern[..size as usize].iter() {
self.out.write_char(back::COMPONENTS[sc as usize])?;
}
}
// `Compose` is pretty simple we just write `type(components)` where `components` is a
// comma separated list of expressions
Expression::Compose { ty, ref components } => {
self.write_type(ty)?;
let resolved = ctx.info[expr].ty.inner_with(&self.module.types);
if let TypeInner::Array { size, .. } = *resolved {
self.write_array_size(size)?;
}
write!(self.out, "(")?;
self.write_slice(components, |this, _, arg| this.write_expr(*arg, ctx))?;
write!(self.out, ")")?
}
// Function arguments are written as the argument name
Expression::FunctionArgument(pos) => {
write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
}
// Global variables need some special work for their name but
// `get_global_name` does the work for us
Expression::GlobalVariable(handle) => {
let global = &self.module.global_variables[handle];
self.write_global_name(handle, global)?
}
// A local is written as it's name
Expression::LocalVariable(handle) => {
write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
}
// glsl has no pointers so there's no load operation, just write the pointer expression
Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
// `ImageSample` is a bit complicated compared to the rest of the IR.
//
// First there are three variations depending wether the sample level is explicitly set,
// if it's automatic or it it's bias:
// `texture(image, coordinate)` - Automatic sample level
// `texture(image, coordinate, bias)` - Bias sample level
// `textureLod(image, coordinate, level)` - Zero or Exact sample level
//
// Furthermore if `depth_ref` is some we need to append it to the coordinate vector
Expression::ImageSample {
image,
sampler: _, //TODO?
coordinate,
array_index,
offset,
level,
depth_ref,
} => {
let dim = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image { dim, .. } => dim,
_ => unreachable!(),
};
if dim == crate::ImageDimension::Cube
&& array_index.is_some()
&& depth_ref.is_some()
{
match level {
crate::SampleLevel::Zero
| crate::SampleLevel::Exact(_)
| crate::SampleLevel::Gradient { .. }
| crate::SampleLevel::Bias(_) => {
return Err(Error::Custom(String::from(
"gsamplerCubeArrayShadow isn't supported in textureGrad, \
textureLod or texture with bias",
)))
}
crate::SampleLevel::Auto => {}
}
}
// textureLod on sampler2DArrayShadow and samplerCubeShadow does not exist in GLSL.
// To emulate this, we will have to use textureGrad with a constant gradient of 0.
let workaround_lod_array_shadow_as_grad = (array_index.is_some()
|| dim == crate::ImageDimension::Cube)
&& depth_ref.is_some()
&& !self
.options
.writer_flags
.contains(WriterFlags::TEXTURE_SHADOW_LOD);
//Write the function to be used depending on the sample level
let fun_name = match level {
crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => {
if workaround_lod_array_shadow_as_grad {
"textureGrad"
} else {
"textureLod"
}
}
crate::SampleLevel::Gradient { .. } => "textureGrad",
};
let offset_name = match offset {
Some(_) => "Offset",
None => "",
};
write!(self.out, "{}{}(", fun_name, offset_name)?;
// Write the image that will be used
self.write_expr(image, ctx)?;
// The space here isn't required but it helps with readability
write!(self.out, ", ")?;
// We need to get the coordinates vector size to later build a vector that's `size + 1`
// if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
let mut coord_dim = match *ctx.info[coordinate].ty.inner_with(&self.module.types) {
TypeInner::Vector { size, .. } => size as u8,
TypeInner::Scalar { .. } => 1,
_ => unreachable!(),
};
if array_index.is_some() {
coord_dim += 1;
}
let cube_array_shadow = coord_dim == 4;
if depth_ref.is_some() && !cube_array_shadow {
coord_dim += 1;
}
let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
let is_vec = tex_1d_hack || coord_dim != 1;
// Compose a new texture coordinates vector
if is_vec {
write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
}
self.write_expr(coordinate, ctx)?;
if tex_1d_hack {
write!(self.out, ", 0.0")?;
}
if let Some(expr) = array_index {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
if !cube_array_shadow {
if let Some(expr) = depth_ref {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
}
if is_vec {
write!(self.out, ")")?;
}
if cube_array_shadow {
if let Some(expr) = depth_ref {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
}
match level {
// Auto needs no more arguments
crate::SampleLevel::Auto => (),
// Zero needs level set to 0
crate::SampleLevel::Zero => {
if workaround_lod_array_shadow_as_grad {
write!(self.out, ", vec2(0,0), vec2(0,0)")?;
} else {
write!(self.out, ", 0.0")?;
}
}
// Exact and bias require another argument
crate::SampleLevel::Exact(expr) => {
if workaround_lod_array_shadow_as_grad {
log::warn!("Unable to `textureLod` a shadow array, ignoring the LOD");
write!(self.out, ", vec2(0,0), vec2(0,0)")?;
} else {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
}
crate::SampleLevel::Bias(expr) => {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
crate::SampleLevel::Gradient { x, y } => {
write!(self.out, ", ")?;
self.write_expr(x, ctx)?;
write!(self.out, ", ")?;
self.write_expr(y, ctx)?;
}
}
if let Some(constant) = offset {
write!(self.out, ", ")?;
if tex_1d_hack {
write!(self.out, "ivec2(")?;
}
self.write_constant(constant)?;
if tex_1d_hack {
write!(self.out, ", 0)")?;
}
}
// End the function
write!(self.out, ")")?
}
// `ImageLoad` is also a bit complicated.
// There are two functions one for sampled
// images another for storage images, the former uses `texelFetch` and the latter uses
// `imageLoad`.
// Furthermore we have `index` which is always `Some` for sampled images
// and `None` for storage images, so we end up with two functions:
// `texelFetch(image, coordinate, index)` - for sampled images
// `imageLoad(image, coordinate)` - for storage images
Expression::ImageLoad {
image,
coordinate,
array_index,
index,
} => {
// This will only panic if the module is invalid
let (dim, class) = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image {
dim,
arrayed: _,
class,
} => (dim, class),
_ => unreachable!(),
};
let fun_name = match class {
crate::ImageClass::Sampled { .. } => "texelFetch",
crate::ImageClass::Storage { .. } => "imageLoad",
// TODO: Is there even a function for this?
crate::ImageClass::Depth { multi: _ } => {
return Err(Error::Custom("TODO: depth sample loads".to_string()))
}
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
self.write_texture_coordinates(coordinate, array_index, dim, ctx)?;
if let Some(index_expr) = index {
write!(self.out, ", ")?;
self.write_expr(index_expr, ctx)?;
}
write!(self.out, ")")?;
}
// Query translates into one of the:
// - textureSize/imageSize
// - textureQueryLevels
// - textureSamples/imageSamples
Expression::ImageQuery { image, query } => {
use crate::ImageClass;
// This will only panic if the module is invalid
let (dim, class) = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image {
dim,
arrayed: _,
class,
} => (dim, class),
_ => unreachable!(),
};
let components = match dim {
crate::ImageDimension::D1 => 1,
crate::ImageDimension::D2 => 2,
crate::ImageDimension::D3 => 3,
crate::ImageDimension::Cube => 2,
};
match query {
crate::ImageQuery::Size { level } => {
match class {
ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
write!(self.out, "textureSize(")?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
if let Some(expr) = level {
self.write_expr(expr, ctx)?;
} else {
write!(self.out, "0")?;
}
}
ImageClass::Storage { .. } => {
write!(self.out, "imageSize(")?;
self.write_expr(image, ctx)?;
}
}
write!(self.out, ")")?;
if components != 1 || self.options.version.is_es() {
write!(self.out, ".{}", &"xyz"[..components])?;
}
}
crate::ImageQuery::NumLevels => {
write!(self.out, "textureQueryLevels(",)?;
self.write_expr(image, ctx)?;
write!(self.out, ")",)?;
}
crate::ImageQuery::NumLayers => {
let fun_name = match class {
ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
ImageClass::Storage { .. } => "imageSize",
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(image, ctx)?;
if components != 1 || self.options.version.is_es() {
write!(self.out, ", 0).{}", back::COMPONENTS[components])?;
}
}
crate::ImageQuery::NumSamples => {
// assumes ARB_shader_texture_image_samples
let fun_name = match class {
ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
"textureSamples"
}
ImageClass::Storage { .. } => "imageSamples",
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(image, ctx)?;
write!(self.out, ")",)?;
}
}
}
// `Unary` is pretty straightforward
// "-" - for `Negate`
// "~" - for `Not` if it's an integer
// "!" - for `Not` if it's a boolean
//
// We also wrap the everything in parentheses to avoid precedence issues
Expression::Unary { op, expr } => {
use crate::{ScalarKind as Sk, UnaryOperator as Uo};
write!(
self.out,
"({} ",
match op {
Uo::Negate => "-",
Uo::Not => match *ctx.info[expr].ty.inner_with(&self.module.types) {
TypeInner::Scalar { kind: Sk::Sint, .. } => "~",
TypeInner::Scalar { kind: Sk::Uint, .. } => "~",
TypeInner::Scalar { kind: Sk::Bool, .. } => "!",
ref other =>
return Err(Error::Custom(format!(
"Cannot apply not to type {:?}",
other
))),
},
}
)?;
self.write_expr(expr, ctx)?;
write!(self.out, ")")?
}
// `Binary` we just write `left op right`, except when dealing with
// comparison operations on vectors as they are implemented with
// builtin functions.
// Once again we wrap everything in parentheses to avoid precedence issues
Expression::Binary {
mut op,
left,
right,
} => {
// Holds `Some(function_name)` if the binary operation is
// implemented as a function call
use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
let left_inner = ctx.info[left].ty.inner_with(&self.module.types);
let right_inner = ctx.info[right].ty.inner_with(&self.module.types);
let function = match (left_inner, right_inner) {
(
&Ti::Vector {
kind: left_kind, ..
},
&Ti::Vector {
kind: right_kind, ..
},
) => match op {
Bo::Less
| Bo::LessEqual
| Bo::Greater
| Bo::GreaterEqual
| Bo::Equal
| Bo::NotEqual => BinaryOperation::VectorCompare,
Bo::Modulo => match (left_kind, right_kind) {
(Sk::Float, _) | (_, Sk::Float) => match op {
Bo::Modulo => BinaryOperation::Modulo,
_ => BinaryOperation::Other,
},
_ => BinaryOperation::Other,
},
_ => BinaryOperation::Other,
},
_ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
(Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
Bo::Modulo => BinaryOperation::Modulo,
_ => BinaryOperation::Other,
},
(Some(Sk::Bool), Some(Sk::Bool)) => match op {
Bo::InclusiveOr => {
op = crate::BinaryOperator::LogicalOr;
BinaryOperation::Other
}
Bo::And => {
op = crate::BinaryOperator::LogicalAnd;
BinaryOperation::Other
}
_ => BinaryOperation::Other,
},
_ => BinaryOperation::Other,
},
};
match function {
BinaryOperation::VectorCompare => {
let op_str = match op {
Bo::Less => "lessThan(",
Bo::LessEqual => "lessThanEqual(",
Bo::Greater => "greaterThan(",
Bo::GreaterEqual => "greaterThanEqual(",
Bo::Equal => "equal(",
Bo::NotEqual => "notEqual(",
_ => unreachable!(),
};
write!(self.out, "{}", op_str)?;
self.write_expr(left, ctx)?;
write!(self.out, ", ")?;
self.write_expr(right, ctx)?;
write!(self.out, ")")?;
}
BinaryOperation::Modulo => {
write!(self.out, "(")?;
// write `e1 - e2 * trunc(e1 / e2)`
self.write_expr(left, ctx)?;
write!(self.out, " - ")?;
self.write_expr(right, ctx)?;
write!(self.out, " * ")?;
write!(self.out, "trunc(")?;
self.write_expr(left, ctx)?;
write!(self.out, " / ")?;
self.write_expr(right, ctx)?;
write!(self.out, ")")?;
write!(self.out, ")")?;
}
BinaryOperation::Other => {
write!(self.out, "(")?;
self.write_expr(left, ctx)?;
write!(self.out, " {} ", super::binary_operation_str(op))?;
self.write_expr(right, ctx)?;
write!(self.out, ")")?;
}
}
}
// `Select` is written as `condition ? accept : reject`
// We wrap everything in parentheses to avoid precedence issues
Expression::Select {
condition,
accept,
reject,
} => {
let cond_ty = ctx.info[condition].ty.inner_with(&self.module.types);
let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
true
} else {
false
};
// TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
if vec_select {
// Glsl defines that for mix when the condition is a boolean the first element
// is picked if condition is false and the second if condition is true
write!(self.out, "mix(")?;
self.write_expr(reject, ctx)?;
write!(self.out, ", ")?;
self.write_expr(accept, ctx)?;
write!(self.out, ", ")?;
self.write_expr(condition, ctx)?;
} else {
write!(self.out, "(")?;
self.write_expr(condition, ctx)?;
write!(self.out, " ? ")?;
self.write_expr(accept, ctx)?;
write!(self.out, " : ")?;
self.write_expr(reject, ctx)?;
}
write!(self.out, ")")?
}
// `Derivative` is a function call to a glsl provided function
Expression::Derivative { axis, expr } => {
use crate::DerivativeAxis as Da;
write!(
self.out,
"{}(",
match axis {
Da::X => "dFdx",
Da::Y => "dFdy",
Da::Width => "fwidth",
}
)?;
self.write_expr(expr, ctx)?;
write!(self.out, ")")?
}
// `Relational` is a normal function call to some glsl provided functions
Expression::Relational { fun, argument } => {
use crate::RelationalFunction as Rf;
let fun_name = match fun {
// There's no specific function for this but we can invert the result of `isinf`
Rf::IsFinite => "!isinf",
Rf::IsInf => "isinf",
Rf::IsNan => "isnan",
// There's also no function for this but we can invert `isnan`
Rf::IsNormal => "!isnan",
Rf::All => "all",
Rf::Any => "any",
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(argument, ctx)?;
write!(self.out, ")")?
}
Expression::Math {
fun,
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
let fun_name = match fun {
// comparison
Mf::Abs => "abs",
Mf::Min => "min",
Mf::Max => "max",
Mf::Clamp => "clamp",
// trigonometry
Mf::Cos => "cos",
Mf::Cosh => "cosh",
Mf::Sin => "sin",
Mf::Sinh => "sinh",
Mf::Tan => "tan",
Mf::Tanh => "tanh",
Mf::Acos => "acos",
Mf::Asin => "asin",
Mf::Atan => "atan",
Mf::Asinh => "asinh",
Mf::Acosh => "acosh",
Mf::Atanh => "atanh",
// glsl doesn't have atan2 function
// use two-argument variation of the atan function
Mf::Atan2 => "atan",
// decomposition
Mf::Ceil => "ceil",
Mf::Floor => "floor",
Mf::Round => "roundEven",
Mf::Fract => "fract",
Mf::Trunc => "trunc",
Mf::Modf => "modf",
Mf::Frexp => "frexp",
Mf::Ldexp => "ldexp",
// exponent
Mf::Exp => "exp",
Mf::Exp2 => "exp2",
Mf::Log => "log",
Mf::Log2 => "log2",
Mf::Pow => "pow",
// geometry
Mf::Dot => "dot",
Mf::Outer => "outerProduct",
Mf::Cross => "cross",
Mf::Distance => "distance",
Mf::Length => "length",
Mf::Normalize => "normalize",
Mf::FaceForward => "faceforward",
Mf::Reflect => "reflect",
Mf::Refract => "refract",
// computational
Mf::Sign => "sign",
Mf::Fma => "fma",
Mf::Mix => "mix",
Mf::Step => "step",
Mf::SmoothStep => "smoothstep",
Mf::Sqrt => "sqrt",
Mf::InverseSqrt => "inversesqrt",
Mf::Inverse => "inverse",
Mf::Transpose => "transpose",
Mf::Determinant => "determinant",
// bits
Mf::CountOneBits => "bitCount",
Mf::ReverseBits => "bitfieldReverse",
Mf::ExtractBits => "bitfieldExtract",
Mf::InsertBits => "bitfieldInsert",
// data packing
Mf::Pack4x8snorm => "packSnorm4x8",
Mf::Pack4x8unorm => "packUnorm4x8",
Mf::Pack2x16snorm => "packSnorm2x16",
Mf::Pack2x16unorm => "packUnorm2x16",
Mf::Pack2x16float => "packHalf2x16",
// data unpacking
Mf::Unpack4x8snorm => "unpackSnorm4x8",
Mf::Unpack4x8unorm => "unpackUnorm4x8",
Mf::Unpack2x16snorm => "unpackSnorm2x16",
Mf::Unpack2x16unorm => "unpackUnorm2x16",
Mf::Unpack2x16float => "unpackHalf2x16",
};
let extract_bits = fun == Mf::ExtractBits;
let insert_bits = fun == Mf::InsertBits;
write!(self.out, "{}(", fun_name)?;
self.write_expr(arg, ctx)?;
if let Some(arg) = arg1 {
write!(self.out, ", ")?;
if extract_bits {
write!(self.out, "int(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr(arg, ctx)?;
}
}
if let Some(arg) = arg2 {
write!(self.out, ", ")?;
if extract_bits || insert_bits {
write!(self.out, "int(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr(arg, ctx)?;
}
}
if let Some(arg) = arg3 {
write!(self.out, ", ")?;
if insert_bits {
write!(self.out, "int(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr(arg, ctx)?;
}
}
write!(self.out, ")")?
}
// `As` is always a call.
// If `convert` is true the function name is the type
// Else the function name is one of the glsl provided bitcast functions
Expression::As {
expr,
kind: target_kind,
convert,
} => {
let inner = ctx.info[expr].ty.inner_with(&self.module.types);
match convert {
Some(width) => {
// this is similar to `write_type`, but with the target kind
let scalar = glsl_scalar(target_kind, width)?;
match *inner {
TypeInner::Vector { size, .. } => {
write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
}
_ => write!(self.out, "{}", scalar.full)?,
}
write!(self.out, "(")?;
self.write_expr(expr, ctx)?;
write!(self.out, ")")?
}
None => {
use crate::ScalarKind as Sk;
let source_kind = inner.scalar_kind().unwrap();
let conv_op = match (source_kind, target_kind) {
(Sk::Float, Sk::Sint) => "floatBitsToInt",
(Sk::Float, Sk::Uint) => "floatBitsToUInt",
(Sk::Sint, Sk::Float) => "intBitsToFloat",
(Sk::Uint, Sk::Float) => "uintBitsToFloat",
// There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
(Sk::Uint, Sk::Sint) => "int",
(Sk::Sint, Sk::Uint) => "uint",
(Sk::Bool, Sk::Sint) => "int",
(Sk::Bool, Sk::Uint) => "uint",
(Sk::Bool, Sk::Float) => "float",
(Sk::Sint, Sk::Bool) => "bool",
(Sk::Uint, Sk::Bool) => "bool",
(Sk::Float, Sk::Bool) => "bool",
// No conversion needed
(Sk::Sint, Sk::Sint) => "",
(Sk::Uint, Sk::Uint) => "",
(Sk::Float, Sk::Float) => "",
(Sk::Bool, Sk::Bool) => "",
};
write!(self.out, "{}", conv_op)?;
if !conv_op.is_empty() {
write!(self.out, "(")?;
}
self.write_expr(expr, ctx)?;
if !conv_op.is_empty() {
write!(self.out, ")")?
}
}
}
}
// These expressions never show up in `Emit`.
Expression::CallResult(_) | Expression::AtomicResult { .. } => unreachable!(),
// `ArrayLength` is written as `expr.length()` and we convert it to a uint
Expression::ArrayLength(expr) => {
write!(self.out, "uint(")?;
self.write_expr(expr, ctx)?;
write!(self.out, ".length())")?
}
}
Ok(())
}
fn write_texture_coordinates(
&mut self,
coordinate: Handle<crate::Expression>,
array_index: Option<Handle<crate::Expression>>,
dim: crate::ImageDimension,
ctx: &back::FunctionCtx,
) -> Result<(), Error> {
use crate::ImageDimension as IDim;
match array_index {
Some(layer_expr) => {
let tex_coord_type = match dim {
IDim::D1 => "ivec2",
IDim::D2 => "ivec3",
IDim::D3 => "ivec4",
IDim::Cube => "ivec4",
};
write!(self.out, "{}(", tex_coord_type)?;
self.write_expr(coordinate, ctx)?;
write!(self.out, ", ")?;
self.write_expr(layer_expr, ctx)?;
write!(self.out, ")")?;
}
None => {
let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
if tex_1d_hack {
write!(self.out, "ivec2(")?;
}
self.write_expr(coordinate, ctx)?;
if tex_1d_hack {
write!(self.out, ", 0.0)")?;
}
}
}
Ok(())
}
fn write_named_expr(
&mut self,
handle: Handle<crate::Expression>,
name: String,
ctx: &back::FunctionCtx,
) -> BackendResult {
match ctx.info[handle].ty {
proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
TypeInner::Struct { .. } => {
let ty_name = &self.names[&NameKey::Type(ty_handle)];
write!(self.out, "{}", ty_name)?;
}
_ => {
self.write_type(ty_handle)?;
}
},
proc::TypeResolution::Value(ref inner) => {
self.write_value_type(inner)?;
}
}
let base_ty_res = &ctx.info[handle].ty;
let resolved = base_ty_res.inner_with(&self.module.types);
write!(self.out, " {}", name)?;
if let TypeInner::Array { size, .. } = *resolved {
self.write_array_size(size)?;
}
write!(self.out, " = ")?;
self.write_expr(handle, ctx)?;
writeln!(self.out, ";")?;
self.named_expressions.insert(handle, name);
Ok(())
}
/// Helper function that write string with default zero initialization for supported types
fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
let inner = &self.module.types[ty].inner;
match *inner {
TypeInner::Scalar { kind, .. } => {
self.write_zero_init_scalar(kind)?;
}
TypeInner::Vector { size, kind, .. } => {
self.write_value_type(inner)?;
write!(self.out, "(")?;
for _ in 1..(size as usize) {
self.write_zero_init_scalar(kind)?;
write!(self.out, ", ")?;
}
// write last parameter without comma and space
self.write_zero_init_scalar(kind)?;
write!(self.out, ")")?;
}
TypeInner::Matrix { columns, rows, .. } => {
let number_of_components = (columns as usize) * (rows as usize);
self.write_value_type(inner)?;
write!(self.out, "(")?;
for _ in 1..number_of_components {
// IR supports only float matrix
self.write_zero_init_scalar(crate::ScalarKind::Float)?;
write!(self.out, ", ")?;
}
// write last parameter without comma and space
self.write_zero_init_scalar(crate::ScalarKind::Float)?;
write!(self.out, ")")?;
}
_ => {} // TODO:
}
Ok(())
}
/// Helper function that write string with zero initialization for scalar
fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
match kind {
crate::ScalarKind::Bool => write!(self.out, "false")?,
crate::ScalarKind::Uint => write!(self.out, "0u")?,
crate::ScalarKind::Float => write!(self.out, "0.0")?,
crate::ScalarKind::Sint => write!(self.out, "0")?,
}
Ok(())
}
/// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
///
/// glsl allows adding both `readonly` and `writeonly` but this means that
/// they can only be used to query information about the resource which isn't what
/// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
if !storage_access.contains(crate::StorageAccess::STORE) {
write!(self.out, "readonly ")?;
}
if !storage_access.contains(crate::StorageAccess::LOAD) {
write!(self.out, "writeonly ")?;
}
Ok(())
}
/// Helper method used to produce the reflection info that's returned to the user
///
/// It takes an iterator of [`Function`](crate::Function) references instead of
/// [`Handle`](crate::arena::Handle) because [`EntryPoint`](crate::EntryPoint) isn't in any
/// [`Arena`](crate::arena::Arena) and we need to traverse it
fn collect_reflection_info(&self) -> Result<ReflectionInfo, Error> {
use std::collections::hash_map::Entry;
let info = self.info.get_entry_point(self.entry_point_idx as usize);
let mut texture_mapping = crate::FastHashMap::default();
let mut uniforms = crate::FastHashMap::default();
for sampling in info.sampling_set.iter() {
let tex_name = self.reflection_names_globals[&sampling.image].clone();
match texture_mapping.entry(tex_name) {
Entry::Vacant(v) => {
v.insert(TextureMapping {
texture: sampling.image,
sampler: Some(sampling.sampler),
});
}
Entry::Occupied(e) => {
if e.get().sampler != Some(sampling.sampler) {
log::error!("Conflicting samplers for {}", e.key());
return Err(Error::ImageMultipleSamplers);
}
}
}
}
for (handle, var) in self.module.global_variables.iter() {
if info[handle].is_empty() {
continue;
}
match self.module.types[var.ty].inner {
crate::TypeInner::Struct { .. } => match var.class {
crate::StorageClass::Uniform | crate::StorageClass::Storage { .. } => {
let name = self.reflection_names_globals[&handle].clone();
uniforms.insert(handle, name);
}
_ => (),
},
crate::TypeInner::Image { .. } => {
let tex_name = self.reflection_names_globals[&handle].clone();
match texture_mapping.entry(tex_name) {
Entry::Vacant(v) => {
v.insert(TextureMapping {
texture: handle,
sampler: None,
});
}
Entry::Occupied(_) => {
// already used with a sampler, do nothing
}
}
}
_ => {}
}
}
Ok(ReflectionInfo {
texture_mapping,
uniforms,
})
}
}
/// Structure returned by [`glsl_scalar`](glsl_scalar)
///
/// It contains both a prefix used in other types and the full type name
struct ScalarString<'a> {
/// The prefix used to compose other types
prefix: &'a str,
/// The name of the scalar type
full: &'a str,
}
/// Helper function that returns scalar related strings
///
/// Check [`ScalarString`](ScalarString) for the information provided
///
/// # Errors
/// If a [`Float`](crate::ScalarKind::Float) with an width that isn't 4 or 8
fn glsl_scalar(
kind: crate::ScalarKind,
width: crate::Bytes,
) -> Result<ScalarString<'static>, Error> {
use crate::ScalarKind as Sk;
Ok(match kind {
Sk::Sint => ScalarString {
prefix: "i",
full: "int",
},
Sk::Uint => ScalarString {
prefix: "u",
full: "uint",
},
Sk::Float => match width {
4 => ScalarString {
prefix: "",
full: "float",
},
8 => ScalarString {
prefix: "d",
full: "double",
},
_ => return Err(Error::UnsupportedScalar(kind, width)),
},
Sk::Bool => ScalarString {
prefix: "b",
full: "bool",
},
})
}
/// Helper function that returns the glsl variable name for a builtin
fn glsl_built_in(built_in: crate::BuiltIn, output: bool) -> &'static str {
use crate::BuiltIn as Bi;
match built_in {
Bi::Position => {
if output {
"gl_Position"
} else {
"gl_FragCoord"
}
}
Bi::ViewIndex => "gl_ViewIndex",
// vertex
Bi::BaseInstance => "uint(gl_BaseInstance)",
Bi::BaseVertex => "uint(gl_BaseVertex)",
Bi::ClipDistance => "gl_ClipDistance",
Bi::CullDistance => "gl_CullDistance",
Bi::InstanceIndex => "uint(gl_InstanceID)",
Bi::PointSize => "gl_PointSize",
Bi::VertexIndex => "uint(gl_VertexID)",
// fragment
Bi::FragDepth => "gl_FragDepth",
Bi::FrontFacing => "gl_FrontFacing",
Bi::PrimitiveIndex => "uint(gl_PrimitiveID)",
Bi::SampleIndex => "gl_SampleID",
Bi::SampleMask => {
if output {
"gl_SampleMask"
} else {
"gl_SampleMaskIn"
}
}
// compute
Bi::GlobalInvocationId => "gl_GlobalInvocationID",
Bi::LocalInvocationId => "gl_LocalInvocationID",
Bi::LocalInvocationIndex => "gl_LocalInvocationIndex",
Bi::WorkGroupId => "gl_WorkGroupID",
Bi::WorkGroupSize => "gl_WorkGroupSize",
Bi::NumWorkGroups => "gl_NumWorkGroups",
}
}
/// Helper function that returns the string corresponding to the storage class
fn glsl_storage_class(class: crate::StorageClass) -> Option<&'static str> {
use crate::StorageClass as Sc;
match class {
Sc::Function => None,
Sc::Private => None,
Sc::Storage { .. } => Some("buffer"),
Sc::Uniform => Some("uniform"),
Sc::Handle => Some("uniform"),
Sc::WorkGroup => Some("shared"),
Sc::PushConstant => None,
}
}
/// Helper function that returns the string corresponding to the glsl interpolation qualifier
fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str {
use crate::Interpolation as I;
match interpolation {
I::Perspective => "smooth",
I::Linear => "noperspective",
I::Flat => "flat",
}
}
/// Return the GLSL auxiliary qualifier for the given sampling value.
fn glsl_sampling(sampling: crate::Sampling) -> Option<&'static str> {
use crate::Sampling as S;
match sampling {
S::Center => None,
S::Centroid => Some("centroid"),
S::Sample => Some("sample"),
}
}
/// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension)
fn glsl_dimension(dim: crate::ImageDimension) -> &'static str {
use crate::ImageDimension as IDim;
match dim {
IDim::D1 => "1D",
IDim::D2 => "2D",
IDim::D3 => "3D",
IDim::Cube => "Cube",
}
}
/// Helper function that returns the glsl storage format string of [`StorageFormat`](crate::StorageFormat)
fn glsl_storage_format(format: crate::StorageFormat) -> &'static str {
use crate::StorageFormat as Sf;
match format {
Sf::R8Unorm => "r8",
Sf::R8Snorm => "r8_snorm",
Sf::R8Uint => "r8ui",
Sf::R8Sint => "r8i",
Sf::R16Uint => "r16ui",
Sf::R16Sint => "r16i",
Sf::R16Float => "r16f",
Sf::Rg8Unorm => "rg8",
Sf::Rg8Snorm => "rg8_snorm",
Sf::Rg8Uint => "rg8ui",
Sf::Rg8Sint => "rg8i",
Sf::R32Uint => "r32ui",
Sf::R32Sint => "r32i",
Sf::R32Float => "r32f",
Sf::Rg16Uint => "rg16ui",
Sf::Rg16Sint => "rg16i",
Sf::Rg16Float => "rg16f",
Sf::Rgba8Unorm => "rgba8ui",
Sf::Rgba8Snorm => "rgba8_snorm",
Sf::Rgba8Uint => "rgba8ui",
Sf::Rgba8Sint => "rgba8i",
Sf::Rgb10a2Unorm => "rgb10_a2ui",
Sf::Rg11b10Float => "r11f_g11f_b10f",
Sf::Rg32Uint => "rg32ui",
Sf::Rg32Sint => "rg32i",
Sf::Rg32Float => "rg32f",
Sf::Rgba16Uint => "rgba16ui",
Sf::Rgba16Sint => "rgba16i",
Sf::Rgba16Float => "rgba16f",
Sf::Rgba32Uint => "rgba32ui",
Sf::Rgba32Sint => "rgba32i",
Sf::Rgba32Float => "rgba32f",
}
}
fn is_value_init_supported(module: &crate::Module, ty: Handle<crate::Type>) -> bool {
match module.types[ty].inner {
TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => true,
_ => false,
}
}
| 40.933045 | 141 | 0.462513 |
33381de4163718d289c77401371919b7a90cc772 | 369 | extern crate tinysegmenter;
#[test]
fn tokenize() {
assert_eq!(
tinysegmenter::tokenize("私の名前は中野です"),
["私", "の", "名前", "は", "中野", "です"]);
assert_eq!(
tinysegmenter::tokenize("TinySegmenterは25kBで書かれています。"),
["TinySegmenter", "は", "2", "5", "kB", "で", "書か", "れ", "て", "い", "ます", "。"]);
assert_eq!(tinysegmenter::tokenize(""), [] as [&str; 0]);
}
| 24.6 | 81 | 0.552846 |
39704e2f3dad90d3e95df7cec554ce620a18ed19 | 2,621 | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::channel::mpsc::TrySendError;
use std::{error, fmt, io, num, result};
use tokio_util::codec::LinesCodecError;
pub type Result<T> = result::Result<T, Error>;
pub fn none_error<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Error::NoneError(error.into())
}
#[derive(Debug)]
pub enum Error {
Io(io::Error),
TrySendError(Box<dyn error::Error + Send>),
SerdeJson(serde_json::Error),
LinesCodecError(LinesCodecError),
LibZfsError(libzfs_types::LibZfsError),
ParseIntError(num::ParseIntError),
NoneError(Box<dyn error::Error + Send + Sync>),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "{}", err),
Error::TrySendError(ref err) => write!(f, "{}", err),
Error::SerdeJson(ref err) => write!(f, "{}", err),
Error::LinesCodecError(ref err) => write!(f, "{}", err),
Error::LibZfsError(ref err) => write!(f, "{}", err),
Error::ParseIntError(ref err) => write!(f, "{}", err),
Error::NoneError(ref err) => write!(f, "{}", err),
}
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
Error::Io(ref err) => Some(err),
Error::TrySendError(_) => None,
Error::SerdeJson(ref err) => Some(err),
Error::LinesCodecError(ref err) => Some(err),
Error::LibZfsError(ref err) => Some(err),
Error::ParseIntError(ref err) => Some(err),
Error::NoneError(_) => None,
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<LinesCodecError> for Error {
fn from(err: LinesCodecError) -> Self {
Error::LinesCodecError(err)
}
}
impl<E> From<TrySendError<E>> for Error
where
E: Send + 'static,
{
fn from(err: TrySendError<E>) -> Self {
Error::TrySendError(Box::new(err))
}
}
impl From<serde_json::Error> for Error {
fn from(err: serde_json::Error) -> Self {
Error::SerdeJson(err)
}
}
impl From<libzfs_types::LibZfsError> for Error {
fn from(err: libzfs_types::LibZfsError) -> Self {
Error::LibZfsError(err)
}
}
impl From<num::ParseIntError> for Error {
fn from(err: num::ParseIntError) -> Self {
Error::ParseIntError(err)
}
}
| 27.589474 | 68 | 0.585654 |
4af99d5014352f633f5acd80f17c7df8c8308f9e | 5,640 | use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::ZeroCopyBuf;
use deno_core::{CancelFuture, Resource};
use deno_core::{CancelHandle, OpState};
use deno_core::{RcRef, ResourceId};
use serde::Deserialize;
use serde::Serialize;
use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
enum Transferable {
MessagePort(MessagePort),
}
type MessagePortMessage = (Vec<u8>, Vec<Transferable>);
pub struct MessagePort {
rx: RefCell<UnboundedReceiver<MessagePortMessage>>,
tx: RefCell<Option<UnboundedSender<MessagePortMessage>>>,
}
impl MessagePort {
pub fn send(
&self,
state: &mut OpState,
data: JsMessageData,
) -> Result<(), AnyError> {
let transferables =
deserialize_js_transferables(state, data.transferables)?;
// Swallow the failed to send error. It means the channel was disentangled,
// but not cleaned up.
if let Some(tx) = &*self.tx.borrow() {
tx.send((data.data.to_vec(), transferables)).ok();
}
Ok(())
}
pub async fn recv(
&self,
state: Rc<RefCell<OpState>>,
) -> Result<Option<JsMessageData>, AnyError> {
let mut rx = self
.rx
.try_borrow_mut()
.map_err(|_| type_error("Port receiver is already borrowed"))?;
if let Some((data, transferables)) = rx.recv().await {
let js_transferables =
serialize_transferables(&mut state.borrow_mut(), transferables);
return Ok(Some(JsMessageData {
data: ZeroCopyBuf::from(data),
transferables: js_transferables,
}));
}
Ok(None)
}
/// This forcefully disconnects the message port from its paired port. This
/// will wake up the `.recv` on the paired port, which will return `Ok(None)`.
pub fn disentangle(&self) {
let mut tx = self.tx.borrow_mut();
tx.take();
}
}
pub fn create_entangled_message_port() -> (MessagePort, MessagePort) {
let (port1_tx, port2_rx) = unbounded_channel::<MessagePortMessage>();
let (port2_tx, port1_rx) = unbounded_channel::<MessagePortMessage>();
let port1 = MessagePort {
rx: RefCell::new(port1_rx),
tx: RefCell::new(Some(port1_tx)),
};
let port2 = MessagePort {
rx: RefCell::new(port2_rx),
tx: RefCell::new(Some(port2_tx)),
};
(port1, port2)
}
pub struct MessagePortResource {
port: MessagePort,
cancel: CancelHandle,
}
impl Resource for MessagePortResource {
fn name(&self) -> Cow<str> {
"messagePort".into()
}
fn close(self: Rc<Self>) {
self.cancel.cancel();
}
}
pub fn op_message_port_create_entangled(
state: &mut OpState,
_: (),
_: (),
) -> Result<(ResourceId, ResourceId), AnyError> {
let (port1, port2) = create_entangled_message_port();
let port1_id = state.resource_table.add(MessagePortResource {
port: port1,
cancel: CancelHandle::new(),
});
let port2_id = state.resource_table.add(MessagePortResource {
port: port2,
cancel: CancelHandle::new(),
});
Ok((port1_id, port2_id))
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "kind", content = "data", rename_all = "camelCase")]
pub enum JsTransferable {
#[serde(rename_all = "camelCase")]
MessagePort(ResourceId),
}
fn deserialize_js_transferables(
state: &mut OpState,
js_transferables: Vec<JsTransferable>,
) -> Result<Vec<Transferable>, AnyError> {
let mut transferables = Vec::with_capacity(js_transferables.len());
for js_transferable in js_transferables {
match js_transferable {
JsTransferable::MessagePort(id) => {
let resource = state
.resource_table
.take::<MessagePortResource>(id)
.map_err(|_| type_error("Invalid message port transfer"))?;
resource.cancel.cancel();
let resource = Rc::try_unwrap(resource)
.map_err(|_| type_error("Message port is not ready for transfer"))?;
transferables.push(Transferable::MessagePort(resource.port));
}
}
}
Ok(transferables)
}
fn serialize_transferables(
state: &mut OpState,
transferables: Vec<Transferable>,
) -> Vec<JsTransferable> {
let mut js_transferables = Vec::with_capacity(transferables.len());
for transferable in transferables {
match transferable {
Transferable::MessagePort(port) => {
let rid = state.resource_table.add(MessagePortResource {
port,
cancel: CancelHandle::new(),
});
js_transferables.push(JsTransferable::MessagePort(rid));
}
}
}
js_transferables
}
#[derive(Deserialize, Serialize)]
pub struct JsMessageData {
data: ZeroCopyBuf,
transferables: Vec<JsTransferable>,
}
pub fn op_message_port_post_message(
state: &mut OpState,
rid: ResourceId,
data: JsMessageData,
) -> Result<(), AnyError> {
for js_transferable in &data.transferables {
match js_transferable {
JsTransferable::MessagePort(id) => {
if *id == rid {
return Err(type_error("Can not transfer self message port"));
}
}
}
}
let resource = state.resource_table.get::<MessagePortResource>(rid)?;
resource.port.send(state, data)
}
pub async fn op_message_port_recv_message(
state: Rc<RefCell<OpState>>,
rid: ResourceId,
_: (),
) -> Result<Option<JsMessageData>, AnyError> {
let resource = {
let state = state.borrow();
match state.resource_table.get::<MessagePortResource>(rid) {
Ok(resource) => resource,
Err(_) => return Ok(None),
}
};
let cancel = RcRef::map(resource.clone(), |r| &r.cancel);
resource.port.recv(state).or_cancel(cancel).await?
}
| 26.35514 | 80 | 0.669504 |
e93bc86a10380d4a83d9f9cd39e1b0555948a0e1 | 6,303 | //! Unary iterator on bit vectors.
use crate::bit_vector::WORD_LEN;
use crate::broadword;
use crate::BitVector;
/// Iterator for enumerating positions of set bits, created by [`BitVector::unary_iter`].
pub struct UnaryIter<'a> {
bv: &'a BitVector,
pos: usize,
buf: usize,
}
impl<'a> UnaryIter<'a> {
/// Creates the iterator from the given bit position.
pub fn new(bv: &'a BitVector, pos: usize) -> Self {
let buf =
bv.words()[pos / WORD_LEN] & (usize::max_value().wrapping_shl((pos % WORD_LEN) as u32));
Self { bv, pos, buf }
}
/// Gets the current bit position.
#[inline(always)]
pub const fn position(&self) -> usize {
self.pos
}
/// Skips to the `k`-th one after the current position.
///
/// # Examples
///
/// ```
/// use sucds::BitVector;
///
/// let bv = BitVector::from_bits([false, true, false, false, true, true]);
/// let mut it = bv.unary_iter(0);
///
/// assert_eq!(it.skip1(0), Some(1));
/// assert_eq!(it.skip1(1), Some(4));
/// assert_eq!(it.skip1(2), None);
/// ```
#[inline(always)]
pub fn skip1(&mut self, k: usize) -> Option<usize> {
let mut skipped = 0;
let mut buf = self.buf;
loop {
let w = broadword::popcount(buf);
if skipped + w > k {
break;
}
skipped += w;
self.pos += WORD_LEN;
let word_pos = self.pos / WORD_LEN;
if self.bv.num_words() <= word_pos {
return None;
}
buf = self.bv.words()[word_pos];
}
debug_assert!(buf != 0);
let pos_in_word = broadword::select_in_word(buf, k - skipped);
self.buf = buf & usize::max_value().wrapping_shl(pos_in_word as u32);
self.pos = (self.pos & !(WORD_LEN - 1)) + pos_in_word;
Some(self.pos)
}
/// Skips to the `k`-th zero after the current position.
///
/// # Examples
///
/// ```
/// use sucds::BitVector;
///
/// let bv = BitVector::from_bits([false, true, false, false, true, true]);
/// let mut it = bv.unary_iter(0);
///
/// assert_eq!(it.skip0(0), Some(0));
/// assert_eq!(it.skip0(1), Some(2));
/// assert_eq!(it.skip0(2), None);
/// ```
#[inline(always)]
pub fn skip0(&mut self, k: usize) -> Option<usize> {
let mut skipped = 0;
let pos_in_word = self.pos % WORD_LEN;
let mut buf = !self.buf & usize::max_value().wrapping_shl(pos_in_word as u32);
loop {
let w = broadword::popcount(buf);
if skipped + w > k {
break;
}
skipped += w;
self.pos += WORD_LEN;
let word_pos = self.pos / WORD_LEN;
if self.bv.num_words() <= word_pos {
return None;
}
buf = !self.bv.words()[word_pos];
}
debug_assert!(buf != 0);
let pos_in_word = broadword::select_in_word(buf, k - skipped);
self.buf = !buf & usize::max_value().wrapping_shl(pos_in_word as u32);
self.pos = (self.pos & !(WORD_LEN - 1)) + pos_in_word;
Some(self.pos).filter(|&x| x < self.bv.len())
}
}
impl<'a> Iterator for UnaryIter<'a> {
type Item = usize;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
let mut buf = self.buf;
while buf == 0 {
self.pos += WORD_LEN;
let word_pos = self.pos / WORD_LEN;
if self.bv.num_words() <= word_pos {
return None;
}
buf = self.bv.words()[word_pos];
}
let pos_in_word = broadword::lsb(buf).unwrap();
self.buf = buf & (buf - 1); // clear LSB
self.pos = (self.pos & !(WORD_LEN - 1)) + pos_in_word;
Some(self.pos)
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
fn gen_random_bits(len: usize, p: f64, seed: u64) -> (Vec<bool>, usize) {
let mut rng = ChaChaRng::seed_from_u64(seed);
let bits = (0..len).map(|_| rng.gen_bool(p)).collect();
let pos = rng.gen_range(0..len);
(bits, pos)
}
fn test_unary_iter(bits: &[bool], pos: usize) {
let bv = BitVector::from_bits(bits.iter().cloned());
let mut expected = vec![];
for (i, &b) in bits[pos..].iter().enumerate() {
if b {
expected.push(pos + i);
}
}
let mut it = bv.unary_iter(pos);
for &ex in &expected {
assert_eq!(it.next(), Some(ex));
}
assert_eq!(it.next(), None);
}
fn test_skip1(bits: &[bool], mut pos: usize) {
let bv = BitVector::from_bits(bits.iter().cloned());
pos = bv.successor1(pos).unwrap();
let mut it = bv.unary_iter(pos);
while let Some(i) = it.skip1(2) {
pos = bv.successor1(pos + 1).unwrap();
pos = bv.successor1(pos + 1).unwrap();
assert_eq!(i, pos);
}
}
fn test_skip0(bits: &[bool], mut pos: usize) {
let bv = BitVector::from_bits(bits.iter().cloned());
pos = bv.successor0(pos).unwrap();
let mut it = bv.unary_iter(pos);
while let Some(i) = it.skip0(2) {
pos = bv.successor0(pos + 1).unwrap();
pos = bv.successor0(pos + 1).unwrap();
assert_eq!(i, pos);
}
}
#[test]
fn test_random_bits() {
for seed in 0..100 {
let (bits, pos) = gen_random_bits(10000, 0.5, seed);
test_unary_iter(&bits, pos);
test_skip1(&bits, pos);
test_skip0(&bits, pos);
}
}
#[test]
fn test_sparse_random_bits() {
for seed in 0..100 {
let (bits, pos) = gen_random_bits(10000, 0.1, seed);
test_unary_iter(&bits, pos);
test_skip1(&bits, pos);
test_skip0(&bits, pos);
}
}
#[test]
fn test_dense_random_bits() {
for seed in 0..100 {
let (bits, pos) = gen_random_bits(10000, 0.9, seed);
test_unary_iter(&bits, pos);
test_skip1(&bits, pos);
test_skip0(&bits, pos);
}
}
}
| 30.014286 | 100 | 0.510709 |
bb8d90e55347263fc893fdc2c4f459881a8984b0 | 20,659 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::Accessible;
use crate::AccessibleProperty;
use crate::AccessibleRelation;
use crate::AccessibleRole;
use crate::AccessibleState;
use crate::DebugFlags;
use crate::PageSetup;
use crate::PrintSettings;
#[cfg(any(target_os = "linux", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(target_os = "linux")))]
use crate::Printer;
use crate::StyleContext;
use crate::TextDirection;
use crate::TreeModel;
use crate::TreePath;
use crate::Widget;
use crate::Window;
use glib::object::IsA;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::mem;
use std::ptr;
#[doc(alias = "gtk_accelerator_get_default_mod_mask")]
pub fn accelerator_get_default_mod_mask() -> gdk::ModifierType {
assert_initialized_main_thread!();
unsafe { from_glib(ffi::gtk_accelerator_get_default_mod_mask()) }
}
#[doc(alias = "gtk_accelerator_get_label")]
pub fn accelerator_get_label(
accelerator_key: u32,
accelerator_mods: gdk::ModifierType,
) -> Option<glib::GString> {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::gtk_accelerator_get_label(
accelerator_key,
accelerator_mods.to_glib(),
))
}
}
#[doc(alias = "gtk_accelerator_get_label_with_keycode")]
pub fn accelerator_get_label_with_keycode(
display: Option<&gdk::Display>,
accelerator_key: u32,
keycode: u32,
accelerator_mods: gdk::ModifierType,
) -> Option<glib::GString> {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::gtk_accelerator_get_label_with_keycode(
display.to_glib_none().0,
accelerator_key,
keycode,
accelerator_mods.to_glib(),
))
}
}
#[doc(alias = "gtk_accelerator_name")]
pub fn accelerator_name(
accelerator_key: u32,
accelerator_mods: gdk::ModifierType,
) -> Option<glib::GString> {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::gtk_accelerator_name(
accelerator_key,
accelerator_mods.to_glib(),
))
}
}
#[doc(alias = "gtk_accelerator_name_with_keycode")]
pub fn accelerator_name_with_keycode(
display: Option<&gdk::Display>,
accelerator_key: u32,
keycode: u32,
accelerator_mods: gdk::ModifierType,
) -> Option<glib::GString> {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::gtk_accelerator_name_with_keycode(
display.to_glib_none().0,
accelerator_key,
keycode,
accelerator_mods.to_glib(),
))
}
}
#[doc(alias = "gtk_accelerator_parse")]
pub fn accelerator_parse(accelerator: &str) -> Option<(u32, gdk::ModifierType)> {
assert_initialized_main_thread!();
unsafe {
let mut accelerator_key = mem::MaybeUninit::uninit();
let mut accelerator_mods = mem::MaybeUninit::uninit();
let ret = from_glib(ffi::gtk_accelerator_parse(
accelerator.to_glib_none().0,
accelerator_key.as_mut_ptr(),
accelerator_mods.as_mut_ptr(),
));
let accelerator_key = accelerator_key.assume_init();
let accelerator_mods = accelerator_mods.assume_init();
if ret {
Some((accelerator_key, from_glib(accelerator_mods)))
} else {
None
}
}
}
//#[doc(alias = "gtk_accelerator_parse_with_keycode")]
//pub fn accelerator_parse_with_keycode(accelerator: &str, display: Option<&gdk::Display>, accelerator_codes: Vec<u32>) -> Option<(u32, gdk::ModifierType)> {
// unsafe { TODO: call ffi:gtk_accelerator_parse_with_keycode() }
//}
#[doc(alias = "gtk_check_version")]
pub fn check_version(
required_major: u32,
required_minor: u32,
required_micro: u32,
) -> Option<glib::GString> {
skip_assert_initialized!();
unsafe {
from_glib_none(ffi::gtk_check_version(
required_major,
required_minor,
required_micro,
))
}
}
#[doc(alias = "gtk_css_parser_warning_quark")]
pub fn css_parser_warning_quark() -> glib::Quark {
assert_initialized_main_thread!();
unsafe { from_glib(ffi::gtk_css_parser_warning_quark()) }
}
#[doc(alias = "gtk_disable_setlocale")]
pub fn disable_setlocale() {
assert_not_initialized!();
unsafe {
ffi::gtk_disable_setlocale();
}
}
//#[doc(alias = "gtk_distribute_natural_allocation")]
//pub fn distribute_natural_allocation(extra_space: i32, sizes: /*Ignored*/&[&RequestedSize]) -> i32 {
// unsafe { TODO: call ffi:gtk_distribute_natural_allocation() }
//}
#[cfg(any(target_os = "linux", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(target_os = "linux")))]
#[doc(alias = "gtk_enumerate_printers")]
pub fn enumerate_printers<P: Fn(&Printer) -> bool + Send + Sync + 'static>(func: P, wait: bool) {
assert_initialized_main_thread!();
let func_data: Box_<P> = Box_::new(func);
unsafe extern "C" fn func_func<P: Fn(&Printer) -> bool + Send + Sync + 'static>(
printer: *mut ffi::GtkPrinter,
data: glib::ffi::gpointer,
) -> glib::ffi::gboolean {
let printer = from_glib_borrow(printer);
let callback: &P = &*(data as *mut _);
let res = (*callback)(&printer);
res.to_glib()
}
let func = Some(func_func::<P> as _);
unsafe extern "C" fn destroy_func<P: Fn(&Printer) -> bool + Send + Sync + 'static>(
data: glib::ffi::gpointer,
) {
let _callback: Box_<P> = Box_::from_raw(data as *mut _);
}
let destroy_call2 = Some(destroy_func::<P> as _);
let super_callback0: Box_<P> = func_data;
unsafe {
ffi::gtk_enumerate_printers(
func,
Box_::into_raw(super_callback0) as *mut _,
destroy_call2,
wait.to_glib(),
);
}
}
#[doc(alias = "gtk_get_binary_age")]
pub fn get_binary_age() -> u32 {
skip_assert_initialized!();
unsafe { ffi::gtk_get_binary_age() }
}
#[doc(alias = "gtk_get_debug_flags")]
pub fn get_debug_flags() -> DebugFlags {
assert_initialized_main_thread!();
unsafe { from_glib(ffi::gtk_get_debug_flags()) }
}
#[doc(alias = "gtk_get_default_language")]
pub fn get_default_language() -> Option<pango::Language> {
assert_initialized_main_thread!();
unsafe { from_glib_none(ffi::gtk_get_default_language()) }
}
#[doc(alias = "gtk_get_interface_age")]
pub fn get_interface_age() -> u32 {
skip_assert_initialized!();
unsafe { ffi::gtk_get_interface_age() }
}
#[doc(alias = "gtk_get_locale_direction")]
pub fn get_locale_direction() -> TextDirection {
assert_initialized_main_thread!();
unsafe { from_glib(ffi::gtk_get_locale_direction()) }
}
#[doc(alias = "gtk_get_major_version")]
pub fn get_major_version() -> u32 {
skip_assert_initialized!();
unsafe { ffi::gtk_get_major_version() }
}
#[doc(alias = "gtk_get_micro_version")]
pub fn get_micro_version() -> u32 {
skip_assert_initialized!();
unsafe { ffi::gtk_get_micro_version() }
}
#[doc(alias = "gtk_get_minor_version")]
pub fn get_minor_version() -> u32 {
skip_assert_initialized!();
unsafe { ffi::gtk_get_minor_version() }
}
#[doc(alias = "gtk_hsv_to_rgb")]
pub fn hsv_to_rgb(h: f32, s: f32, v: f32) -> (f32, f32, f32) {
assert_initialized_main_thread!();
unsafe {
let mut r = mem::MaybeUninit::uninit();
let mut g = mem::MaybeUninit::uninit();
let mut b = mem::MaybeUninit::uninit();
ffi::gtk_hsv_to_rgb(h, s, v, r.as_mut_ptr(), g.as_mut_ptr(), b.as_mut_ptr());
let r = r.assume_init();
let g = g.assume_init();
let b = b.assume_init();
(r, g, b)
}
}
#[doc(alias = "gtk_print_run_page_setup_dialog")]
pub fn print_run_page_setup_dialog<P: IsA<Window>>(
parent: Option<&P>,
page_setup: Option<&PageSetup>,
settings: &PrintSettings,
) -> Option<PageSetup> {
skip_assert_initialized!();
unsafe {
from_glib_full(ffi::gtk_print_run_page_setup_dialog(
parent.map(|p| p.as_ref()).to_glib_none().0,
page_setup.to_glib_none().0,
settings.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_print_run_page_setup_dialog_async")]
pub fn print_run_page_setup_dialog_async<
P: IsA<Window>,
Q: FnOnce(&PageSetup) + Send + Sync + 'static,
>(
parent: Option<&P>,
page_setup: Option<&PageSetup>,
settings: &PrintSettings,
done_cb: Q,
) {
skip_assert_initialized!();
let done_cb_data: Box_<Q> = Box_::new(done_cb);
unsafe extern "C" fn done_cb_func<
P: IsA<Window>,
Q: FnOnce(&PageSetup) + Send + Sync + 'static,
>(
page_setup: *mut ffi::GtkPageSetup,
data: glib::ffi::gpointer,
) {
let page_setup = from_glib_borrow(page_setup);
let callback: Box_<Q> = Box_::from_raw(data as *mut _);
(*callback)(&page_setup);
}
let done_cb = Some(done_cb_func::<P, Q> as _);
let super_callback0: Box_<Q> = done_cb_data;
unsafe {
ffi::gtk_print_run_page_setup_dialog_async(
parent.map(|p| p.as_ref()).to_glib_none().0,
page_setup.to_glib_none().0,
settings.to_glib_none().0,
done_cb,
Box_::into_raw(super_callback0) as *mut _,
);
}
}
#[doc(alias = "gtk_render_activity")]
pub fn render_activity<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_activity(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_arrow")]
pub fn render_arrow<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
angle: f64,
x: f64,
y: f64,
size: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_arrow(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
angle,
x,
y,
size,
);
}
}
#[doc(alias = "gtk_render_background")]
pub fn render_background<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_background(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_check")]
pub fn render_check<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_check(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_expander")]
pub fn render_expander<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_expander(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_focus")]
pub fn render_focus<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_focus(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_frame")]
pub fn render_frame<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_frame(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_handle")]
pub fn render_handle<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_handle(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_render_icon")]
pub fn render_icon<P: IsA<StyleContext>, Q: IsA<gdk::Texture>>(
context: &P,
cr: &cairo::Context,
texture: &Q,
x: f64,
y: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_icon(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
texture.as_ref().to_glib_none().0,
x,
y,
);
}
}
#[doc(alias = "gtk_render_layout")]
pub fn render_layout<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
layout: &pango::Layout,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_layout(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
layout.to_glib_none().0,
);
}
}
#[doc(alias = "gtk_render_line")]
pub fn render_line<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x0: f64,
y0: f64,
x1: f64,
y1: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_line(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x0,
y0,
x1,
y1,
);
}
}
#[doc(alias = "gtk_render_option")]
pub fn render_option<P: IsA<StyleContext>>(
context: &P,
cr: &cairo::Context,
x: f64,
y: f64,
width: f64,
height: f64,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_render_option(
context.as_ref().to_glib_none().0,
mut_override(cr.to_glib_none().0),
x,
y,
width,
height,
);
}
}
#[doc(alias = "gtk_rgb_to_hsv")]
pub fn rgb_to_hsv(r: f32, g: f32, b: f32) -> (f32, f32, f32) {
assert_initialized_main_thread!();
unsafe {
let mut h = mem::MaybeUninit::uninit();
let mut s = mem::MaybeUninit::uninit();
let mut v = mem::MaybeUninit::uninit();
ffi::gtk_rgb_to_hsv(r, g, b, h.as_mut_ptr(), s.as_mut_ptr(), v.as_mut_ptr());
let h = h.assume_init();
let s = s.assume_init();
let v = v.assume_init();
(h, s, v)
}
}
#[doc(alias = "gtk_set_debug_flags")]
pub fn set_debug_flags(flags: DebugFlags) {
assert_initialized_main_thread!();
unsafe {
ffi::gtk_set_debug_flags(flags.to_glib());
}
}
#[doc(alias = "gtk_show_uri")]
pub fn show_uri<P: IsA<Window>>(parent: Option<&P>, uri: &str, timestamp: u32) {
assert_initialized_main_thread!();
unsafe {
ffi::gtk_show_uri(
parent.map(|p| p.as_ref()).to_glib_none().0,
uri.to_glib_none().0,
timestamp,
);
}
}
#[doc(alias = "gtk_test_accessible_assertion_message_role")]
pub fn test_accessible_assertion_message_role<P: IsA<Accessible>>(
domain: &str,
file: &str,
line: i32,
func: &str,
expr: &str,
accessible: &P,
expected_role: AccessibleRole,
actual_role: AccessibleRole,
) {
skip_assert_initialized!();
unsafe {
ffi::gtk_test_accessible_assertion_message_role(
domain.to_glib_none().0,
file.to_glib_none().0,
line,
func.to_glib_none().0,
expr.to_glib_none().0,
accessible.as_ref().to_glib_none().0,
expected_role.to_glib(),
actual_role.to_glib(),
);
}
}
//#[doc(alias = "gtk_test_accessible_check_property")]
//pub fn test_accessible_check_property<P: IsA<Accessible>>(accessible: &P, property: AccessibleProperty, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> Option<glib::GString> {
// unsafe { TODO: call ffi:gtk_test_accessible_check_property() }
//}
//#[doc(alias = "gtk_test_accessible_check_relation")]
//pub fn test_accessible_check_relation<P: IsA<Accessible>>(accessible: &P, relation: AccessibleRelation, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> Option<glib::GString> {
// unsafe { TODO: call ffi:gtk_test_accessible_check_relation() }
//}
//#[doc(alias = "gtk_test_accessible_check_state")]
//pub fn test_accessible_check_state<P: IsA<Accessible>>(accessible: &P, state: AccessibleState, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> Option<glib::GString> {
// unsafe { TODO: call ffi:gtk_test_accessible_check_state() }
//}
#[doc(alias = "gtk_test_accessible_has_property")]
pub fn test_accessible_has_property<P: IsA<Accessible>>(
accessible: &P,
property: AccessibleProperty,
) -> bool {
skip_assert_initialized!();
unsafe {
from_glib(ffi::gtk_test_accessible_has_property(
accessible.as_ref().to_glib_none().0,
property.to_glib(),
))
}
}
#[doc(alias = "gtk_test_accessible_has_relation")]
pub fn test_accessible_has_relation<P: IsA<Accessible>>(
accessible: &P,
relation: AccessibleRelation,
) -> bool {
skip_assert_initialized!();
unsafe {
from_glib(ffi::gtk_test_accessible_has_relation(
accessible.as_ref().to_glib_none().0,
relation.to_glib(),
))
}
}
#[doc(alias = "gtk_test_accessible_has_role")]
pub fn test_accessible_has_role<P: IsA<Accessible>>(accessible: &P, role: AccessibleRole) -> bool {
skip_assert_initialized!();
unsafe {
from_glib(ffi::gtk_test_accessible_has_role(
accessible.as_ref().to_glib_none().0,
role.to_glib(),
))
}
}
#[doc(alias = "gtk_test_accessible_has_state")]
pub fn test_accessible_has_state<P: IsA<Accessible>>(
accessible: &P,
state: AccessibleState,
) -> bool {
skip_assert_initialized!();
unsafe {
from_glib(ffi::gtk_test_accessible_has_state(
accessible.as_ref().to_glib_none().0,
state.to_glib(),
))
}
}
//#[doc(alias = "gtk_test_init")]
//pub fn test_init(argvp: /*Unimplemented*/Vec<glib::GString>, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) {
// unsafe { TODO: call ffi:gtk_test_init() }
//}
//#[doc(alias = "gtk_test_list_all_types")]
//pub fn test_list_all_types() -> /*Unimplemented*/CArray TypeId { ns_id: 0, id: 30 } {
// unsafe { TODO: call ffi:gtk_test_list_all_types() }
//}
#[doc(alias = "gtk_test_register_all_types")]
pub fn test_register_all_types() {
assert_initialized_main_thread!();
unsafe {
ffi::gtk_test_register_all_types();
}
}
#[doc(alias = "gtk_test_widget_wait_for_draw")]
pub fn test_widget_wait_for_draw<P: IsA<Widget>>(widget: &P) {
skip_assert_initialized!();
unsafe {
ffi::gtk_test_widget_wait_for_draw(widget.as_ref().to_glib_none().0);
}
}
#[doc(alias = "gtk_tree_create_row_drag_content")]
pub fn tree_create_row_drag_content<P: IsA<TreeModel>>(
tree_model: &P,
path: &mut TreePath,
) -> Option<gdk::ContentProvider> {
skip_assert_initialized!();
unsafe {
from_glib_full(ffi::gtk_tree_create_row_drag_content(
tree_model.as_ref().to_glib_none().0,
path.to_glib_none_mut().0,
))
}
}
#[doc(alias = "gtk_tree_get_row_drag_data")]
pub fn tree_get_row_drag_data(
value: &glib::Value,
) -> Option<(Option<TreeModel>, Option<TreePath>)> {
assert_initialized_main_thread!();
unsafe {
let mut tree_model = ptr::null_mut();
let mut path = ptr::null_mut();
let ret = from_glib(ffi::gtk_tree_get_row_drag_data(
value.to_glib_none().0,
&mut tree_model,
&mut path,
));
if ret {
Some((from_glib_none(tree_model), from_glib_full(path)))
} else {
None
}
}
}
| 27.32672 | 195 | 0.609129 |
712c18bcf1df1f3d6f5675035a1326b87f2b9312 | 44,029 | #[macro_use]
mod utils;
inject_indy_dependencies!();
extern crate indyrs as indy;
extern crate indyrs as api;
extern crate core;
extern crate indy_sys;
use crate::utils::{wallet, anoncreds, blob_storage, pool, ledger, did};
use crate::utils::anoncreds::{COMMON_MASTER_SECRET, CREDENTIAL1_ID};
#[cfg(any(feature = "force_full_interaction_tests", not(target_os = "android")))]
#[cfg(not(feature = "only_high_cases"))]
use crate::utils::anoncreds::{CREDENTIAL2_ID, CREDENTIAL3_ID};
use crate::utils::constants::*;
use crate::utils::Setup;
use crate::utils::domain::anoncreds::schema::Schema;
use crate::utils::domain::anoncreds::credential_definition::CredentialDefinition;
use crate::utils::domain::anoncreds::credential_offer::CredentialOffer;
use crate::utils::domain::anoncreds::credential::Credential;
use crate::utils::domain::anoncreds::credential::CredentialInfo;
use crate::utils::domain::anoncreds::revocation_registry_definition::RevocationRegistryDefinition;
use crate::utils::domain::anoncreds::proof::Proof;
use crate::utils::domain::anoncreds::revocation_state::RevocationState;
use crate::utils::domain::anoncreds::revocation_registry::RevocationRegistry;
use std::thread;
use serde_json::Value;
use core::borrow::Borrow;
use indy::{PoolHandle, WalletHandle};
struct Pool {
pool_handle: PoolHandle
}
struct Issuer {
issuer_wallet_handle: WalletHandle,
issuer_wallet_config: String,
issuer_did: String,
schema_id: String,
cred_def_id: String,
rev_reg_id: String,
issuance_type: String,
tails_writer_config: String
}
struct Prover {
wallet_handle: WalletHandle,
wallet_config: String,
did: String,
verkey: String,
master_secret_id: String,
cred_def_id: Option<String>,
cred_req_metadata_json: Option<String>
}
struct Verifier {
proof_request: String
}
impl Pool {
pub fn new(pool_name: &str) -> Pool {
Pool { pool_handle: pool::create_and_open_pool_ledger(pool_name).unwrap() }
}
pub fn close(self) {
let _ = pool::close(self.pool_handle);
}
pub fn submit_nym(&self, issuer_did: &str, issuer_wallet_handle: WalletHandle, prover_did: &str, prover_verkey: Option<&str>)
{
let nym_request = ledger::build_nym_request(issuer_did, prover_did, prover_verkey, None, None).unwrap();
ledger::sign_and_submit_request(self.pool_handle, issuer_wallet_handle, &issuer_did, &nym_request).unwrap();
}
pub fn submit_schema(&self, issuer_did: &str, issuer_wallet_handle: WalletHandle, schema_json: &str) -> String {
let schema_request = ledger::build_schema_request(issuer_did, schema_json).unwrap();
ledger::sign_and_submit_request(self.pool_handle, issuer_wallet_handle, issuer_did, &schema_request).unwrap()
}
pub fn get_schema(&self, did: Option<&str>, schema_id: &str) -> (String, String) {
let get_schema_request = ledger::build_get_schema_request(did, schema_id).unwrap();
let get_schema_response = ledger::submit_request(self.pool_handle, &get_schema_request).unwrap();
ledger::parse_get_schema_response(&get_schema_response).unwrap()
}
pub fn submit_cred_def(&self, issuer_did: &str, issuer_wallet_handle: WalletHandle, cred_def_json: &str) -> String {
let cred_def_request = ledger::build_cred_def_txn(issuer_did, cred_def_json).unwrap();
ledger::sign_and_submit_request(self.pool_handle, issuer_wallet_handle, issuer_did, &cred_def_request).unwrap()
}
pub fn get_cred_def(&self, did: Option<&str>, cred_def_id: &str) -> (String, String) /* (cred_def_id, cred_def_json) */ {
let get_cred_def_request = ledger::build_get_cred_def_request(did, cred_def_id).unwrap();
let get_cred_def_response = ledger::submit_request(self.pool_handle, &get_cred_def_request).unwrap();
ledger::parse_get_cred_def_response(&get_cred_def_response).unwrap()
}
pub fn submit_revoc_reg_def(&self, issuer_did: &str, issuer_wallet_handle: WalletHandle, rev_reg_def_json: &str) -> String {
let rev_reg_def_request = ledger::build_revoc_reg_def_request(issuer_did, rev_reg_def_json).unwrap();
ledger::sign_and_submit_request(self.pool_handle, issuer_wallet_handle, issuer_did, &rev_reg_def_request).unwrap()
}
pub fn get_revoc_reg_def(&self, did: Option<&str>, revoc_reg_def_id: &str) -> (String, String) /* revoc_reg_def_id, revo_reg_def_json */ {
let get_rev_reg_def_request = ledger::build_get_revoc_reg_def_request(did, &revoc_reg_def_id).unwrap();
let get_rev_reg_def_response = ledger::submit_request(self.pool_handle, &get_rev_reg_def_request).unwrap();
ledger::parse_get_revoc_reg_def_response(&get_rev_reg_def_response).unwrap()
}
pub fn submit_revoc_reg_entry(&self, issuer_did: &str, issuer_wallet_handle: WalletHandle, rev_reg_id: &str, rev_reg_entry_json: &str) -> String {
let rev_reg_entry_request =
ledger::build_revoc_reg_entry_request(issuer_did, rev_reg_id, REVOC_REG_TYPE, rev_reg_entry_json).unwrap();
ledger::sign_and_submit_request(self.pool_handle, issuer_wallet_handle, issuer_did, &rev_reg_entry_request).unwrap()
}
pub fn get_revoc_reg_delta(&self, did: Option<&str>, revoc_reg_def_id: &str, from: Option<u64>, to: u64) -> (String, String, u64) /* rev_reg_id, revoc_reg_delta_json, timestamp */ {
let get_rev_reg_delta_request = ledger::build_get_revoc_reg_delta_request(did, revoc_reg_def_id, from, to).unwrap();
let get_rev_reg_delta_response = ledger::submit_request(self.pool_handle, &get_rev_reg_delta_request).unwrap();
ledger::parse_get_revoc_reg_delta_response(&get_rev_reg_delta_response).unwrap()
}
}
impl Issuer {
pub fn new(pool: &Pool) -> Issuer {
let (wallet_handle, wallet_config) = wallet::create_and_open_default_wallet(format!("wallet_for_pool_{}", pool.pool_handle).borrow()).unwrap();
Issuer {
// Issuer creates wallet, gets wallet handle
issuer_wallet_handle: wallet_handle,
issuer_wallet_config: wallet_config,
// Issuer create DID
issuer_did: did::create_store_and_publish_my_did_from_trustee(wallet_handle, pool.pool_handle).unwrap().0,
schema_id: String::new(),
rev_reg_id: String::new(),
cred_def_id: String::new(),
issuance_type: String::new(),
tails_writer_config: anoncreds::tails_writer_config()
}
}
// creates schema , credential definition and revocation registry
pub fn create_initial_ledger_state(&mut self, pool: &Pool, revoc_registry_config: &str)
{
let revoc_reg_config_value: Value = serde_json::from_str(revoc_registry_config).unwrap();
self.issuance_type = String::from(revoc_reg_config_value.as_object().unwrap().get("issuance_type").unwrap().as_str().unwrap());
// Issuer creates Schema
let (schema_id, schema_json) = anoncreds::issuer_create_schema(&self.issuer_did,
GVT_SCHEMA_NAME,
SCHEMA_VERSION,
GVT_SCHEMA_ATTRIBUTES).unwrap();
// !!IMPORTANT!!
// It is important Post and Get Schema from Ledger and parse it to get the correct Schema JSON and correspondent it seq_no in Ledger
// After that we can create CredentialDefinition for received Schema(not for result of indy_issuer_create_schema)
let _schema_response = pool.submit_schema(&self.issuer_did, self.issuer_wallet_handle, &schema_json);
::std::thread::sleep(::std::time::Duration::from_secs(1));
// Issuer gets Schema from Ledger
let (_, schema_json) = pool.get_schema(Some(&self.issuer_did), &schema_id);
self.schema_id = schema_id;
// Issuer creates CredentialDefinition
let (cred_def_id, cred_def_json) = anoncreds::issuer_create_credential_definition(self.issuer_wallet_handle,
&self.issuer_did,
&schema_json,
TAG_1,
None,
Some(&anoncreds::revocation_cred_def_config())).unwrap();
// Issuer post CredentialDefinition to Ledger
pool.submit_cred_def(&self.issuer_did, self.issuer_wallet_handle, &cred_def_json);
self.cred_def_id = cred_def_id;
// Issuer creates RevocationRegistry
let tails_writer_handle = blob_storage::open_writer("default", &self.tails_writer_config).unwrap();
let (rev_reg_id, rev_reg_def_json, rev_reg_entry_json) =
anoncreds::issuer_create_and_store_revoc_reg(self.issuer_wallet_handle,
&self.issuer_did,
None,
TAG_1,
&self.cred_def_id,
revoc_registry_config,
tails_writer_handle).unwrap();
// Issuer posts RevocationRegistryDefinition to Ledger
pool.submit_revoc_reg_def(&self.issuer_did, self.issuer_wallet_handle, &rev_reg_def_json);
self.rev_reg_id = rev_reg_id;
// Issuer posts RevocationRegistryEntry to Ledger
pool.submit_revoc_reg_entry(&self.issuer_did, self.issuer_wallet_handle, &self.rev_reg_id, &rev_reg_entry_json);
}
pub fn make_credential_offer(&self) -> String
{
let cred_offer_json = anoncreds::issuer_create_credential_offer(self.issuer_wallet_handle, &self.cred_def_id).unwrap();
cred_offer_json
}
pub fn issue_credential(&self, pool: &Pool, cred_offer_json: &str, cred_req_json: &str, cred_values_json: &str) -> (String, String, Option<String>)
{
// Issuer creates TailsReader
let blob_storage_reader_handle = blob_storage::open_reader(TYPE, &self.tails_writer_config).unwrap();
// Issuer creates Credential
// NOte that the function returns revoc_reg_delta_json as None in case
// the revocation registry was created with the strategy ISSUANCE_BY_DEFAULT
let (cred_json, cred_rev_id, revoc_reg_delta_json) = anoncreds::issuer_create_credential(self.issuer_wallet_handle,
&cred_offer_json,
&cred_req_json,
cred_values_json,
Some(&self.rev_reg_id),
Some(blob_storage_reader_handle)).unwrap();
// Issuer does not have to post rev_reg_delta to ledger in case of the strategy ISSUANCE_BY_DEFAULT
if &self.issuance_type == "ISSUANCE_ON_DEMAND" {
pool.submit_revoc_reg_entry(&self.issuer_did, self.issuer_wallet_handle, &self.rev_reg_id, &revoc_reg_delta_json.clone().unwrap());
}
(cred_json, cred_rev_id.unwrap(), revoc_reg_delta_json)
}
pub fn revoke_credential(&self, pool: &Pool, cred_rev_id: &str) -> String
{
// Issuer creates TailsReader
let blob_storage_reader_handle = blob_storage::open_reader(TYPE, &self.tails_writer_config).unwrap();
// Issuer revokes cred_info
let rev_reg_delta_json = anoncreds::issuer_revoke_credential(self.issuer_wallet_handle, blob_storage_reader_handle, &self.rev_reg_id, &cred_rev_id).unwrap();
// Issuer post RevocationRegistryDelta to Ledger
pool.submit_revoc_reg_entry(&self.issuer_did, self.issuer_wallet_handle, &self.rev_reg_id, &rev_reg_delta_json);
rev_reg_delta_json
}
pub fn close(&self)
{
wallet::close_and_delete_wallet(self.issuer_wallet_handle, &self.issuer_wallet_config).unwrap();
}
}
impl Prover
{
pub fn new(master_secret_id: Option<&str>) -> Prover
{
// Prover creates wallet, gets wallet handle
let (prover_wallet_handle, prover_wallet_config) = wallet::create_and_open_default_wallet("interactions_prover").unwrap();
// Prover create DID
let (prover_did, prover_verkey) = did::create_my_did(prover_wallet_handle, "{}").unwrap();
// Prover creates Master Secret
let master_secret_id = master_secret_id.unwrap_or(COMMON_MASTER_SECRET);
anoncreds::prover_create_master_secret(prover_wallet_handle, master_secret_id).unwrap();
Prover {
wallet_handle: prover_wallet_handle,
wallet_config: prover_wallet_config,
did: prover_did.clone(),
verkey: prover_verkey.clone(),
master_secret_id: String::from(master_secret_id),
cred_def_id: None,
cred_req_metadata_json: None
}
}
pub fn make_credential_request(&mut self, pool: &Pool, cred_offer_json: &str) -> String
{
// Prover gets CredentialDefinition from Ledger
let cred_offer: CredentialOffer = serde_json::from_str(&cred_offer_json).unwrap();
let (cred_def_id, cred_def_json) = pool.get_cred_def(Some(&self.did), &cred_offer.cred_def_id.0);
self.cred_def_id = Some(cred_def_id);
// Prover creates Credential Request
let (cred_req_json, cred_req_metadata_json) = anoncreds::prover_create_credential_req(self.wallet_handle,
&self.did,
&cred_offer_json,
&cred_def_json,
&self.master_secret_id).unwrap();
self.cred_req_metadata_json = Some(cred_req_metadata_json);
cred_req_json
}
pub fn store_credentials(&self, pool: &Pool, cred_json: &str, cred_id: &str)
{
let credential: Credential = serde_json::from_str(&cred_json).unwrap();
// Prover gets CredentialDefinition from Ledger
let (_, cred_def_json) = pool.get_cred_def(Some(&self.did), &self.cred_def_id.clone().unwrap());
// Prover gets RevocationRegistryDefinition
let (_, revoc_reg_def_json) = pool.get_revoc_reg_def(None, &credential.rev_reg_id.unwrap().0);
// Prover stores received Credential
anoncreds::prover_store_credential(self.wallet_handle,
cred_id,
&self.cred_req_metadata_json.clone().unwrap(),
&cred_json,
&cred_def_json,
Some(&revoc_reg_def_json)).unwrap();
}
pub fn make_proof(&self, pool: &Pool, proof_request: &str, attr1_referent: &str, from: Option<u64>, to: u64) -> String
{
// Prover searches Credentials for Proof Request
let search_handle = anoncreds::prover_search_credentials_for_proof_req(self.wallet_handle, &proof_request, None).unwrap();
let credentials_list = anoncreds::prover_fetch_next_credentials_for_proof_req(search_handle, attr1_referent, 1).unwrap();
let credentials_list_value: Value = serde_json::from_str(&credentials_list).unwrap();
// extract first result of the search as Value
let credentials_first = &credentials_list_value.as_array().unwrap()[0];
// extract cred_info as Value from the result
let cred_info_value = credentials_first.as_object().unwrap().get("cred_info").unwrap();
let cred_info: CredentialInfo = serde_json::from_value(cred_info_value.clone()).unwrap();
let _ = anoncreds::prover_close_credentials_search_for_proof_req(search_handle).unwrap();
let schema_id = cred_info.schema_id;
let cred_def_id = cred_info.cred_def_id;
let cred_rev_id = cred_info.cred_rev_id.clone().unwrap();
let rev_reg_id = cred_info.rev_reg_id.clone().unwrap();
// Prover gets Schema from Ledger
let (_, schema_json) = pool.get_schema(None, &schema_id.0);
// Prover gets CredentialDefinition from Ledger
let (_, cred_def_json) = pool.get_cred_def(Some(&self.did), &cred_def_id.0);
// Prover gets RevocationRegistryDefinition
let (_, revoc_reg_def_json) = pool.get_revoc_reg_def(None, &rev_reg_id.0);
// Prover gets RevocationRegistryDelta from Ledger
let (_, revoc_reg_delta_json, timestamp) = pool.get_revoc_reg_delta(None, &rev_reg_id.0, from, to);
// Prover creates RevocationState
let prover_blob_storage_reader_handle = blob_storage::open_reader(TYPE, &anoncreds::tails_writer_config()).unwrap();
let rev_state_json = anoncreds::create_revocation_state(prover_blob_storage_reader_handle,
&revoc_reg_def_json,
&revoc_reg_delta_json,
timestamp,
&cred_rev_id).unwrap();
let proof_request_value: Value = serde_json::from_str(proof_request).unwrap();
let requested_predicates = !proof_request_value.as_object().unwrap().get("requested_predicates").unwrap().as_object().unwrap().is_empty();
// Prover creates Proof
let requested_credentials_json = if requested_predicates
{
json!({
"self_attested_attributes": json!({}),
"requested_attributes": json!({
attr1_referent.clone(): json!({ "cred_id": cred_info.referent, "timestamp": timestamp, "revealed":true })
}),
"requested_predicates": json!({
"predicate1_referent": json!({ "cred_id": cred_info.referent, "timestamp": timestamp })
})
}).to_string()
} else {
json!({
"self_attested_attributes": json!({}),
"requested_attributes": json!({
"attr1_referent": json!({ "cred_id": cred_info.referent, "timestamp": timestamp, "revealed":true })
}),
"requested_predicates": json!({})
}).to_string()
};
let schemas_json = json!({
schema_id.0: serde_json::from_str::<Schema>(&schema_json).unwrap()
}).to_string();
let cred_defs_json = json!({
cred_def_id.0: serde_json::from_str::<CredentialDefinition>(&cred_def_json).unwrap()
}).to_string();
let rev_states_json = json!({
rev_reg_id.0: json!({
timestamp.to_string(): serde_json::from_str::<RevocationState>(&rev_state_json).unwrap()
})
}).to_string();
let proof_json = anoncreds::prover_create_proof(self.wallet_handle,
&proof_request,
&requested_credentials_json,
&self.master_secret_id,
&schemas_json,
&cred_defs_json,
&rev_states_json).unwrap();
proof_json
}
pub fn close(&self)
{
wallet::close_and_delete_wallet(self.wallet_handle, &self.wallet_config).unwrap();
}
}
impl Verifier {
pub fn new(proof_request: &String) -> Verifier {
Verifier {
proof_request: proof_request.clone()
}
}
pub fn verify_revealed(&self, proof_json: &str, attr_name: &str, attr_value: &str)
{
let proof: Proof = serde_json::from_str(&proof_json).unwrap();
assert_eq!(attr_value, proof.requested_proof.revealed_attrs.get(attr_name).unwrap().raw)
}
pub fn verify(&self, pool: &Pool, proof_json: &str) -> bool
{
let proof: Proof = serde_json::from_str(&proof_json).unwrap();
assert_eq!(1, proof.identifiers.len());
let identifier = proof.identifiers[0].clone();
// Verifier gets Schema from Ledger
let (_, schema_json) = pool.get_schema(Some(DID_MY1), &identifier.schema_id.0);
// Verifier gets CredentialDefinition from Ledger
let (_, cred_def_json) = pool.get_cred_def(Some(DID_MY1), &identifier.cred_def_id.0);
// Verifier gets RevocationRegistryDefinition from Ledger
let (_, revoc_reg_def_json) = pool.get_revoc_reg_def(Some(DID_MY1), &identifier.rev_reg_id.clone().unwrap().0);
// Verifier gets RevocationRegistry from Ledger
let (_, rev_reg_json, timestamp) =
pool.get_revoc_reg_delta(Some(DID_MY1), &identifier.rev_reg_id.clone().unwrap().0, None, identifier.timestamp.unwrap());
let schemas_json = json!({
identifier.schema_id.0.clone(): serde_json::from_str::<Schema>(&schema_json).unwrap()
}).to_string();
let cred_defs_json = json!({
identifier.cred_def_id.0.clone(): serde_json::from_str::<CredentialDefinition>(&cred_def_json).unwrap()
}).to_string();
let rev_reg_defs_json = json!({
identifier.rev_reg_id.clone().unwrap().0.clone(): serde_json::from_str::<RevocationRegistryDefinition>(&revoc_reg_def_json).unwrap()
}).to_string();
let rev_regs_json = json!({
identifier.rev_reg_id.clone().unwrap().0.clone(): json!({
timestamp.to_string(): serde_json::from_str::<RevocationRegistry>(&rev_reg_json).unwrap()
})
}).to_string();
let valid = anoncreds::verifier_verify_proof(&self.proof_request,
proof_json,
&schemas_json,
&cred_defs_json,
&rev_reg_defs_json,
&rev_regs_json).unwrap();
valid
}
}
#[cfg(feature = "revocation_tests")]
#[test]
fn anoncreds_revocation_interaction_test_issuance_by_demand() {
anoncreds_revocation_interaction_test_one_prover(r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_ON_DEMAND"}"#);
}
#[cfg(feature = "revocation_tests")]
#[cfg(any(feature = "force_full_interaction_tests", not(target_os = "android")))]
#[cfg(not(feature = "only_high_cases"))]
#[test]
fn anoncreds_revocation_interaction_test_issuance_by_default()
{
anoncreds_revocation_interaction_test_one_prover(r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_BY_DEFAULT"}"#);
}
// the common function for two previous tests
fn anoncreds_revocation_interaction_test_one_prover(revocation_registry_config: &str)
{
let setup = Setup::empty();
let pool = Pool::new(&setup.name);
let mut issuer = Issuer::new(&pool);
let mut prover = Prover::new(None);
// Issuer publish Prover DID
pool.submit_nym(&issuer.issuer_did, issuer.issuer_wallet_handle, &prover.did, Some(&prover.verkey));
// ISSUER post to Ledger Schema, CredentialDefinition, RevocationRegistry
issuer.create_initial_ledger_state(&pool, revocation_registry_config);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Issuance Credential for Prover
// Issuer creates Credential Offer
let cred_offer_json = issuer.make_credential_offer();
// Prover makes credential request
let cred_req_json = prover.make_credential_request(&pool, &cred_offer_json);
// Issuer issues credential
let (cred_json, cred_rev_id, _revoc_reg_delta_json) = issuer.issue_credential(&pool, &cred_offer_json, &cred_req_json, &anoncreds::gvt_credential_values_json());
// Prover stores credentials
prover.store_credentials(&pool, &cred_json, CREDENTIAL1_ID);
// Basic check
let credentials = anoncreds::prover_get_credentials(prover.wallet_handle, &json!({"schema_name": GVT_SCHEMA_NAME}).to_string()).unwrap();
let credentials: Vec<serde_json::Value> = serde_json::from_str(&credentials).unwrap();
assert_eq!(credentials.len(), 1);
/////////////////////////////////////////////////////////////////////////////////////////////////
// Verifying Prover's Credential
thread::sleep(std::time::Duration::from_secs(1));
let to = time::get_time().sec as u64;
let proof_request = json!({
"nonce":"123432421212",
"name":"proof_req_1",
"version":"0.1",
"requested_attributes": json!({
"attr1_referent": json!({
"name":"name"
})
}),
"requested_predicates": json!({
"predicate1_referent": json!({ "name":"age", "p_type":">=", "p_value":18 })
}),
"non_revoked": json!({ "to": to.clone() })
}).to_string();
let verifier = Verifier::new(&proof_request);
let proof_json = prover.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies revealed attribute
verifier.verify_revealed(&proof_json, "attr1_referent", "Alex");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
/////////////////////////////////////////////////////////////////////////////////////////
// Issuer revokes cred_rev_id
let _rev_reg_delta_json = issuer.revoke_credential(&pool, &cred_rev_id);
// Verifying Prover Credential after Revocation
thread::sleep(std::time::Duration::from_secs(1));
let from = to;
let to = time::get_time().sec as u64;
let proof_json = prover.make_proof(&pool, &proof_request, "attr1_referent", Some(from), to);
let valid = verifier.verify(&pool, &proof_json);
assert!(!valid);
issuer.close();
prover.close();
pool.close();
}
#[cfg(not(feature = "only_high_cases"))]
fn multi_steps_create_revocation_credential(pool: &Pool, issuer: &Issuer, prover: &mut Prover, cred_values_json: &str, cred_id: &str) -> (String, Option<String>)
{
// Issuer creates Credential Offer
let cred_offer_json = issuer.make_credential_offer();
// Prover makes credential request
let cred_req_json = prover.make_credential_request(&pool, &cred_offer_json);
// Issuer issues credential
let (cred_json, cred_rev_id, revoc_reg_delta_json) = issuer.issue_credential(&pool, &cred_offer_json, &cred_req_json, cred_values_json);
// Prover stores credentials
prover.store_credentials(&pool, &cred_json, cred_id);
(cred_rev_id, revoc_reg_delta_json)
}
#[cfg(feature = "revocation_tests")]
#[cfg(any(feature = "force_full_interaction_tests", not(target_os = "android")))]
#[cfg(not(feature = "only_high_cases"))]
#[test]
fn anoncreds_revocation_interaction_test_issuance_by_demand_three_credentials_post_entry_three_times_proving_first() {
let setup = Setup::empty();
let pool = Pool::new(&setup.name);
let mut issuer = Issuer::new(&pool);
let mut prover1 = Prover::new(Some("prover1_master_secret"));
let mut prover2 = Prover::new(Some("prover2_master_secret"));
let mut prover3 = Prover::new(Some("prover3_master_secret"));
// ISSUER post to Ledger Schema, CredentialDefinition, RevocationRegistry
issuer.create_initial_ledger_state(&pool, r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_ON_DEMAND"}"#);
/*ISSUANCE CREDENTIAL FOR PROVER1*/
let (_prover1_cred_rev_id, _prover1_revoc_reg_delta1_json) =
multi_steps_create_revocation_credential(&pool, &issuer, &mut prover1, &anoncreds::gvt_credential_values_json(), CREDENTIAL1_ID);
/*ISSUANCE CREDENTIAL FOR PROVER2*/
let (_prover2_cred_rev_id, _prover2_revoc_reg_delta1_json) =
multi_steps_create_revocation_credential(&pool, &issuer, &mut prover2, &anoncreds::gvt2_credential_values_json(), CREDENTIAL2_ID);
/*ISSUANCE CREDENTIAL FOR PROVER3*/
let (_prover3_cred_rev_id, _prover3_revoc_reg_delta1_json) =
multi_steps_create_revocation_credential(&pool, &issuer, &mut prover3, &anoncreds::gvt3_credential_values_json(), CREDENTIAL3_ID);
// Verifying Prover1 Credential
thread::sleep(std::time::Duration::from_secs(1));
let to = time::get_time().sec as u64;
let proof_request = json!({
"nonce":"123432421212",
"name":"proof_req_1",
"version":"0.1",
"requested_attributes": json!({
"attr1_referent": json!({
"name":"name"
})
}),
"requested_predicates": json!({}),
"non_revoked": json!({ "to": to.clone() })
}).to_string();
let verifier = Verifier::new(&proof_request);
let proof_json = prover1.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies revealed attribute
verifier.verify_revealed(&proof_json, "attr1_referent", "Alex");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
issuer.close();
prover1.close();
prover2.close();
prover3.close();
pool.close();
}
#[cfg(feature = "revocation_tests")]
#[cfg(any(feature = "force_full_interaction_tests", not(target_os = "android")))]
#[cfg(not(feature = "only_high_cases"))]
#[test]
fn anoncreds_revocation_interaction_test_issuance_by_demand_three_credentials_post_common_entry_proving_all() {
let setup = Setup::empty();
let pool = Pool::new(&setup.name);
let mut issuer = Issuer::new(&pool);
let mut prover1 = Prover::new(Some("prover1_master_secret"));
let mut prover2 = Prover::new(Some("prover2_master_secret"));
let mut prover3 = Prover::new(Some("prover3_master_secret"));
// ISSUER post to Ledger Schema, CredentialDefinition, RevocationRegistry
issuer.create_initial_ledger_state(&pool, r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_ON_DEMAND"}"#);
/*ISSUANCE CREDENTIAL FOR PROVER1*/
let (_prover1_cred_rev_id, revoc_reg_delta1_json) = multi_steps_create_revocation_credential(&pool, &issuer, &mut prover1, &anoncreds::gvt_credential_values_json(), CREDENTIAL1_ID);
let revoc_reg_delta1_json = revoc_reg_delta1_json.unwrap();
/*ISSUANCE CREDENTIAL FOR PROVER2*/
let (_prover2_cred_rev_id, revoc_reg_delta2_json) = multi_steps_create_revocation_credential(&pool, &issuer, &mut prover2, &anoncreds::gvt2_credential_values_json(), CREDENTIAL2_ID);
let revoc_reg_delta2_json = revoc_reg_delta2_json.unwrap();
// Issuer merge Revocation Registry Deltas
let revoc_reg_delta_json = anoncreds::issuer_merge_revocation_registry_deltas(&revoc_reg_delta1_json, &revoc_reg_delta2_json).unwrap();
/*ISSUANCE CREDENTIAL FOR PROVER3*/
let (_prover3_cred_rev_id, revoc_reg_delta3_json) = multi_steps_create_revocation_credential(&pool, &issuer, &mut prover3, &anoncreds::gvt3_credential_values_json(), CREDENTIAL3_ID);
let revoc_reg_delta3_json = revoc_reg_delta3_json.unwrap();
// Issuer merge Revocation Registry Deltas
let _revoc_reg_delta_json = anoncreds::issuer_merge_revocation_registry_deltas(&revoc_reg_delta_json, &revoc_reg_delta3_json).unwrap();
// TODO: test if the issuer can submit one delta instead of multiple deltas consequently
// let rev_reg_entry_request =
// ledger::build_revoc_reg_entry_request(&issuer_did, &rev_reg_id, REVOC_REG_TYPE, &revoc_reg_delta_json).unwrap();
// ledger::sign_and_submit_request(pool_handle, issuer_wallet_handle, &issuer_did, &rev_reg_entry_request).unwrap();
// Verifying Prover1 Credential
thread::sleep(std::time::Duration::from_secs(1));
let to = time::get_time().sec as u64;
let proof_request = json!({
"nonce":"123432421212",
"name":"proof_req_1",
"version":"0.1",
"requested_attributes": json!({
"attr1_referent": json!({
"name":"name"
})
}),
"requested_predicates": json!({}),
"non_revoked": json!({ "to": to.clone() })
}).to_string();
let verifier = Verifier::new(&proof_request);
let proof_json = prover1.make_proof(&pool, &proof_request, "attr1_referent", None, to);
verifier.verify_revealed(&proof_json, "attr1_referent", "Alex");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
// Verifying Prover2 Credential
let proof_json = prover2.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies proof from Prover2
verifier.verify_revealed(&proof_json, "attr1_referent", "Alexander");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
// Verifying Prover3 Credential
let proof_json = prover3.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies proof from Prover2
verifier.verify_revealed(&proof_json, "attr1_referent", "Artem");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
issuer.close();
prover1.close();
prover2.close();
prover3.close();
pool.close();
}
#[cfg(feature = "revocation_tests")]
#[test]
fn anoncreds_revocation_interaction_test_issuance_by_demand_fully_qualified_did() {
let setup = Setup::empty();
let pool = Pool::new(&setup.name);
let (wallet_handle, wallet_config) = wallet::create_and_open_default_wallet(format!("wallet_for_pool_{}", pool.pool_handle).borrow()).unwrap();
let mut issuer = Issuer {
// Issuer creates wallet, gets wallet handle
issuer_wallet_handle: wallet_handle,
// Issuer create DID
issuer_wallet_config: wallet_config,
issuer_did: did::create_store_and_publish_my_did_from_trustee_v1(wallet_handle, pool.pool_handle).unwrap().0,
schema_id: String::new(),
rev_reg_id: String::new(),
cred_def_id: String::new(),
issuance_type: String::new(),
tails_writer_config: anoncreds::tails_writer_config(),
};
// Prover creates wallet, gets wallet handle
let (prover_wallet_handle, prover_wallet_config) = wallet::create_and_open_default_wallet("interactions_prover").unwrap();
// Prover create DID
let my_did_json = json!({"method_name": "sov"}).to_string();
let (prover_did, prover_verkey) = did::create_my_did(prover_wallet_handle, &my_did_json).unwrap();
// Prover creates Master Secret
let master_secret_id = COMMON_MASTER_SECRET;
anoncreds::prover_create_master_secret(prover_wallet_handle, COMMON_MASTER_SECRET).unwrap();
let mut prover = Prover {
wallet_handle: prover_wallet_handle,
wallet_config: prover_wallet_config,
did: prover_did.clone(),
verkey: prover_verkey.clone(),
master_secret_id: String::from(master_secret_id),
cred_def_id: None,
cred_req_metadata_json: None,
};
// Issuer publish Prover DID
pool.submit_nym(&issuer.issuer_did, issuer.issuer_wallet_handle, &prover.did, Some(&prover.verkey));
// ISSUER post to Ledger Schema, CredentialDefinition, RevocationRegistry
issuer.create_initial_ledger_state(&pool, r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_ON_DEMAND"}"#);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Issuance Credential for Prover
// Issuer creates Credential Offer
let cred_offer_json = issuer.make_credential_offer();
// Prover makes credential request
let cred_req_json = prover.make_credential_request(&pool, &cred_offer_json);
// Issuer issues credential
let (cred_json, _, _) = issuer.issue_credential(&pool, &cred_offer_json, &cred_req_json, &anoncreds::gvt_credential_values_json());
// Prover stores credentials
prover.store_credentials(&pool, &cred_json, CREDENTIAL1_ID);
// Basic check
let credentials = anoncreds::prover_get_credentials(prover.wallet_handle, &json!({"schema_name": GVT_SCHEMA_NAME}).to_string()).unwrap();
let credentials: Vec<serde_json::Value> = serde_json::from_str(&credentials).unwrap();
assert_eq!(credentials.len(), 1);
/////////////////////////////////////////////////////////////////////////////////////////////////
// Verifying Prover's Credential
thread::sleep(std::time::Duration::from_secs(1));
let to = time::get_time().sec as u64;
// Verify proof in a short identifiers
let proof_request = json!({
"nonce":"123432421212",
"name":"proof_req_1",
"version":"0.1",
"requested_attributes": json!({
"attr1_referent": json!({
"name":"name",
"restrictions": {
"cred_def_id": issuer.cred_def_id,
}
})
}),
"requested_predicates": json!({
"predicate1_referent": json!({ "name":"age", "p_type":">=", "p_value":18 })
}),
"non_revoked": json!({ "to": to.clone() })
}).to_string();
let proof_request = anoncreds::to_unqualified(&proof_request).unwrap();
let verifier = Verifier::new(&proof_request);
let proof_json = prover.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies revealed attribute
verifier.verify_revealed(&proof_json, "attr1_referent", "Alex");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
// Verify proof in a fully qualified identifiers
let proof_request = json!({
"nonce":"123432421212",
"name":"proof_req_1",
"version":"0.1",
"requested_attributes": json!({
"attr1_referent": json!({
"name":"name",
"restrictions": {
"cred_def_id": issuer.cred_def_id.clone()
}
})
}),
"requested_predicates": json!({
"predicate1_referent": json!({ "name":"age", "p_type":">=", "p_value":18 })
}),
"non_revoked": json!({ "to": to.clone() }),
"ver": "2.0"
}).to_string();
let verifier = Verifier::new(&proof_request);
let proof_json = prover.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies revealed attribute
verifier.verify_revealed(&proof_json, "attr1_referent", "Alex");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
issuer.close();
prover.close();
pool.close();
}
#[cfg(feature = "revocation_tests")]
#[test]
fn anoncreds_revocation_interaction_test_issuance_by_demand_fully_qualified_issuer_unqualified_prover() {
let setup = Setup::empty();
let pool = Pool::new(&setup.name);
let (wallet_handle, wallet_config) = wallet::create_and_open_default_wallet(format!("wallet_for_pool_{}", pool.pool_handle).borrow()).unwrap();
let mut issuer = Issuer {
// Issuer creates wallet, gets wallet handle
issuer_wallet_handle: wallet_handle,
// Issuer create DID
issuer_wallet_config: wallet_config,
issuer_did: did::create_store_and_publish_my_did_from_trustee_v1(wallet_handle, pool.pool_handle).unwrap().0,
schema_id: String::new(),
rev_reg_id: String::new(),
cred_def_id: String::new(),
issuance_type: String::new(),
tails_writer_config: anoncreds::tails_writer_config(),
};
// Prover creates wallet, gets wallet handle
let (prover_wallet_handle, prover_wallet_config) = wallet::create_and_open_default_wallet("interactions_prover").unwrap();
// Prover create DID
let (prover_did, prover_verkey) = did::create_my_did(prover_wallet_handle, "{}").unwrap();
// Prover creates Master Secret
let master_secret_id = COMMON_MASTER_SECRET;
anoncreds::prover_create_master_secret(prover_wallet_handle, COMMON_MASTER_SECRET).unwrap();
let mut prover = Prover {
wallet_handle: prover_wallet_handle,
wallet_config: prover_wallet_config,
did: prover_did.clone(),
verkey: prover_verkey.clone(),
master_secret_id: String::from(master_secret_id),
cred_def_id: None,
cred_req_metadata_json: None,
};
// Issuer publish Prover DID
pool.submit_nym(&issuer.issuer_did, issuer.issuer_wallet_handle, &prover.did, Some(&prover.verkey));
// ISSUER post to Ledger Schema, CredentialDefinition, RevocationRegistry
issuer.create_initial_ledger_state(&pool, r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_ON_DEMAND"}"#);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Issuance Credential for Prover
// Issuer creates Credential Offer
let cred_offer_json = issuer.make_credential_offer();
// Issuer disqualifies Credential Offer
let cred_offer_json = anoncreds::to_unqualified(&cred_offer_json).unwrap();
// Prover makes credential request
let cred_req_json = prover.make_credential_request(&pool, &cred_offer_json);
// Issuer issues credential
let (cred_json, _, _) = issuer.issue_credential(&pool, &cred_offer_json, &cred_req_json, &anoncreds::gvt_credential_values_json());
// Prover stores credentials
prover.store_credentials(&pool, &cred_json, CREDENTIAL1_ID);
// Basic check
let credentials = anoncreds::prover_get_credentials(prover.wallet_handle, &json!({"schema_name": GVT_SCHEMA_NAME}).to_string()).unwrap();
let credentials: Vec<serde_json::Value> = serde_json::from_str(&credentials).unwrap();
assert_eq!(credentials.len(), 1);
/////////////////////////////////////////////////////////////////////////////////////////////////
// Verifying Prover's Credential
thread::sleep(std::time::Duration::from_secs(1));
let to = time::get_time().sec as u64;
// Verify proof in a short identifiers
let proof_request = json!({
"nonce":"123432421212",
"name":"proof_req_1",
"version":"0.1",
"requested_attributes": json!({
"attr1_referent": json!({
"name":"name",
"restrictions": {
"cred_def_id": issuer.cred_def_id
}
})
}),
"requested_predicates": json!({
"predicate1_referent": json!({ "name":"age", "p_type":">=", "p_value":18 })
}),
"non_revoked": json!({ "to": to.clone() })
}).to_string();
let proof_request = anoncreds::to_unqualified(&proof_request).unwrap();
let verifier = Verifier::new(&proof_request);
let proof_json = prover.make_proof(&pool, &proof_request, "attr1_referent", None, to);
// Verifier verifies revealed attribute
verifier.verify_revealed(&proof_json, "attr1_referent", "Alex");
let valid = verifier.verify(&pool, &proof_json);
assert!(valid);
issuer.close();
prover.close();
pool.close();
}
| 40.805375 | 186 | 0.628972 |
db173fca80e66389694342911f65b5ecb87b9ffb | 40,160 | use crate::check::{check_message, inappropriate_handshake_message, inappropriate_message};
use crate::conn::{CommonState, ConnectionRandoms, State};
use crate::error::Error;
use crate::hash_hs::{HandshakeHash, HandshakeHashBuffer};
use crate::kx;
#[cfg(feature = "logging")]
use crate::log::{debug, trace, warn};
use crate::msgs::base::{Payload, PayloadU8};
use crate::msgs::ccs::ChangeCipherSpecPayload;
use crate::msgs::codec::Codec;
use crate::msgs::enums::KeyUpdateRequest;
use crate::msgs::enums::{AlertDescription, NamedGroup, ProtocolVersion};
use crate::msgs::enums::{ContentType, ExtensionType, HandshakeType, SignatureScheme};
use crate::msgs::handshake::ClientExtension;
use crate::msgs::handshake::DigitallySignedStruct;
use crate::msgs::handshake::EncryptedExtensions;
use crate::msgs::handshake::NewSessionTicketPayloadTLS13;
use crate::msgs::handshake::{CertificateEntry, CertificatePayloadTLS13};
use crate::msgs::handshake::{HandshakeMessagePayload, HandshakePayload};
use crate::msgs::handshake::{HasServerExtensions, ServerHelloPayload};
use crate::msgs::handshake::{PresharedKeyIdentity, PresharedKeyOffer};
use crate::msgs::message::{Message, MessagePayload};
use crate::msgs::persist;
use crate::tls13::key_schedule::{
KeyScheduleEarly, KeyScheduleHandshake, KeySchedulePreHandshake, KeyScheduleTraffic,
};
use crate::tls13::Tls13CipherSuite;
use crate::verify;
#[cfg(feature = "quic")]
use crate::{conn::Protocol, msgs::base::PayloadU16, quic};
use crate::{sign, KeyLog};
use super::client_conn::ClientConnectionData;
use super::hs::ClientContext;
use crate::client::common::ServerCertDetails;
use crate::client::common::{ClientAuthDetails, ClientHelloDetails};
use crate::client::{hs, ClientConfig, ServerName};
use crate::ticketer::TimeBase;
use ring::constant_time;
use std::sync::Arc;
// Extensions we expect in plaintext in the ServerHello.
static ALLOWED_PLAINTEXT_EXTS: &[ExtensionType] = &[
ExtensionType::KeyShare,
ExtensionType::PreSharedKey,
ExtensionType::SupportedVersions,
];
// Only the intersection of things we offer, and those disallowed
// in TLS1.3
static DISALLOWED_TLS13_EXTS: &[ExtensionType] = &[
ExtensionType::ECPointFormats,
ExtensionType::SessionTicket,
ExtensionType::RenegotiationInfo,
ExtensionType::ExtendedMasterSecret,
];
pub(super) fn handle_server_hello(
config: Arc<ClientConfig>,
cx: &mut ClientContext,
server_hello: &ServerHelloPayload,
mut resuming_session: Option<persist::Tls13ClientSessionValue>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
early_key_schedule: Option<KeyScheduleEarly>,
hello: ClientHelloDetails,
our_key_share: kx::KeyExchange,
mut sent_tls13_fake_ccs: bool,
) -> hs::NextStateOrError {
validate_server_hello(cx.common, server_hello)?;
let their_key_share = server_hello
.get_key_share()
.ok_or_else(|| {
cx.common
.send_fatal_alert(AlertDescription::MissingExtension);
Error::PeerMisbehavedError("missing key share".to_string())
})?;
if our_key_share.group() != their_key_share.group {
return Err(cx
.common
.illegal_param("wrong group for key share"));
}
let key_schedule_pre_handshake = if let (Some(selected_psk), Some(early_key_schedule)) =
(server_hello.get_psk_index(), early_key_schedule)
{
if let Some(ref resuming) = resuming_session {
let resuming_suite = match suite.can_resume_from(resuming.suite()) {
Some(resuming) => resuming,
None => {
return Err(cx
.common
.illegal_param("server resuming incompatible suite"));
}
};
// If the server varies the suite here, we will have encrypted early data with
// the wrong suite.
if cx.data.early_data.is_enabled() && resuming_suite != suite {
return Err(cx
.common
.illegal_param("server varied suite with early data"));
}
if selected_psk != 0 {
return Err(cx
.common
.illegal_param("server selected invalid psk"));
}
debug!("Resuming using PSK");
// The key schedule has been initialized and set in fill_in_psk_binder()
} else {
return Err(Error::PeerMisbehavedError(
"server selected unoffered psk".to_string(),
));
}
KeySchedulePreHandshake::from(early_key_schedule)
} else {
debug!("Not resuming");
// Discard the early data key schedule.
cx.data.early_data.rejected();
cx.common.early_traffic = false;
resuming_session.take();
KeySchedulePreHandshake::new(suite.hkdf_algorithm)
};
let key_schedule = our_key_share.complete(&their_key_share.payload.0, |secret| {
Ok(key_schedule_pre_handshake.into_handshake(secret))
})?;
// Remember what KX group the server liked for next time.
save_kx_hint(&config, &server_name, their_key_share.group);
// If we change keying when a subsequent handshake message is being joined,
// the two halves will have different record layer protections. Disallow this.
cx.common.check_aligned_handshake()?;
let hash_at_client_recvd_server_hello = transcript.get_current_hash();
let (key_schedule, client_key, server_key) = key_schedule.derive_handshake_secrets(
hash_at_client_recvd_server_hello,
&*config.key_log,
&randoms.client,
);
// Decrypt with the peer's key, encrypt with our own key
cx.common
.record_layer
.set_message_decrypter(suite.derive_decrypter(&server_key));
if !cx.data.early_data.is_enabled() {
// Set the client encryption key for handshakes if early data is not used
cx.common
.record_layer
.set_message_encrypter(suite.derive_encrypter(&client_key));
}
#[cfg(feature = "quic")]
{
cx.common.quic.hs_secrets = Some(quic::Secrets::new(client_key, server_key, suite, true));
}
emit_fake_ccs(&mut sent_tls13_fake_ccs, cx.common);
Ok(Box::new(ExpectEncryptedExtensions {
config,
resuming_session,
server_name,
randoms,
suite,
transcript,
key_schedule,
hello,
}))
}
fn validate_server_hello(
common: &mut CommonState,
server_hello: &ServerHelloPayload,
) -> Result<(), Error> {
for ext in &server_hello.extensions {
if !ALLOWED_PLAINTEXT_EXTS.contains(&ext.get_type()) {
common.send_fatal_alert(AlertDescription::UnsupportedExtension);
return Err(Error::PeerMisbehavedError(
"server sent unexpected cleartext ext".to_string(),
));
}
}
Ok(())
}
pub(super) fn initial_key_share(
config: &ClientConfig,
server_name: &ServerName,
) -> Result<kx::KeyExchange, Error> {
let key = persist::ClientSessionKey::hint_for_server_name(server_name);
let key_buf = key.get_encoding();
let maybe_value = config.session_storage.get(&key_buf);
let group = maybe_value
.and_then(|enc| NamedGroup::read_bytes(&enc))
.and_then(|group| kx::KeyExchange::choose(group, &config.kx_groups))
.unwrap_or_else(|| {
config
.kx_groups
.first()
.expect("No kx groups configured")
});
kx::KeyExchange::start(group).ok_or(Error::FailedToGetRandomBytes)
}
fn save_kx_hint(config: &ClientConfig, server_name: &ServerName, group: NamedGroup) {
let key = persist::ClientSessionKey::hint_for_server_name(server_name);
config
.session_storage
.put(key.get_encoding(), group.get_encoding());
}
/// This implements the horrifying TLS1.3 hack where PSK binders have a
/// data dependency on the message they are contained within.
pub(super) fn fill_in_psk_binder(
resuming: &persist::Tls13ClientSessionValue,
transcript: &HandshakeHashBuffer,
hmp: &mut HandshakeMessagePayload,
) -> KeyScheduleEarly {
// We need to know the hash function of the suite we're trying to resume into.
let hkdf_alg = resuming.suite().hkdf_algorithm;
let suite_hash = resuming.suite().hash_algorithm();
// The binder is calculated over the clienthello, but doesn't include itself or its
// length, or the length of its container.
let binder_plaintext = hmp.get_encoding_for_binder_signing();
let handshake_hash = transcript.get_hash_given(suite_hash, &binder_plaintext);
// Run a fake key_schedule to simulate what the server will do if it chooses
// to resume.
let key_schedule = KeyScheduleEarly::new(hkdf_alg, resuming.secret());
let real_binder = key_schedule.resumption_psk_binder_key_and_sign_verify_data(&handshake_hash);
if let HandshakePayload::ClientHello(ref mut ch) = hmp.payload {
ch.set_psk_binder(real_binder.as_ref());
};
key_schedule
}
pub(super) fn prepare_resumption(
config: &ClientConfig,
cx: &mut ClientContext<'_>,
ticket: Vec<u8>,
resuming_session: &persist::Retrieved<&persist::Tls13ClientSessionValue>,
exts: &mut Vec<ClientExtension>,
doing_retry: bool,
) {
let resuming_suite = resuming_session.suite();
cx.common.suite = Some(resuming_suite.into());
cx.data.resumption_ciphersuite = Some(resuming_suite.into());
// The EarlyData extension MUST be supplied together with the
// PreSharedKey extension.
let max_early_data_size = resuming_session.max_early_data_size();
if config.enable_early_data && max_early_data_size > 0 && !doing_retry {
cx.data
.early_data
.enable(max_early_data_size as usize);
exts.push(ClientExtension::EarlyData);
}
// Finally, and only for TLS1.3 with a ticket resumption, include a binder
// for our ticket. This must go last.
//
// Include an empty binder. It gets filled in below because it depends on
// the message it's contained in (!!!).
let obfuscated_ticket_age = resuming_session.obfuscated_ticket_age();
let binder_len = resuming_suite
.hash_algorithm()
.output_len;
let binder = vec![0u8; binder_len];
let psk_identity = PresharedKeyIdentity::new(ticket, obfuscated_ticket_age);
let psk_ext = PresharedKeyOffer::new(psk_identity, binder);
exts.push(ClientExtension::PresharedKey(psk_ext));
}
pub(super) fn derive_early_traffic_secret(
key_log: &dyn KeyLog,
cx: &mut ClientContext<'_>,
resuming_suite: &'static Tls13CipherSuite,
early_key_schedule: &KeyScheduleEarly,
sent_tls13_fake_ccs: &mut bool,
transcript_buffer: &HandshakeHashBuffer,
client_random: &[u8; 32],
) {
// For middlebox compatibility
emit_fake_ccs(sent_tls13_fake_ccs, cx.common);
let client_hello_hash = transcript_buffer.get_hash_given(resuming_suite.hash_algorithm(), &[]);
let client_early_traffic_secret =
early_key_schedule.client_early_traffic_secret(&client_hello_hash, key_log, client_random);
// Set early data encryption key
cx.common
.record_layer
.set_message_encrypter(resuming_suite.derive_encrypter(&client_early_traffic_secret));
#[cfg(feature = "quic")]
{
cx.common.quic.early_secret = Some(client_early_traffic_secret);
}
// Now the client can send encrypted early data
cx.common.early_traffic = true;
trace!("Starting early data traffic");
}
pub(super) fn emit_fake_ccs(sent_tls13_fake_ccs: &mut bool, common: &mut CommonState) {
if common.is_quic() {
return;
}
if std::mem::replace(sent_tls13_fake_ccs, true) {
return;
}
let m = Message {
version: ProtocolVersion::TLSv1_2,
payload: MessagePayload::ChangeCipherSpec(ChangeCipherSpecPayload {}),
};
common.send_msg(m, false);
}
fn validate_encrypted_extensions(
common: &mut CommonState,
hello: &ClientHelloDetails,
exts: &EncryptedExtensions,
) -> Result<(), Error> {
if exts.has_duplicate_extension() {
common.send_fatal_alert(AlertDescription::DecodeError);
return Err(Error::PeerMisbehavedError(
"server sent duplicate encrypted extensions".to_string(),
));
}
if hello.server_sent_unsolicited_extensions(exts, &[]) {
common.send_fatal_alert(AlertDescription::UnsupportedExtension);
let msg = "server sent unsolicited encrypted extension".to_string();
return Err(Error::PeerMisbehavedError(msg));
}
for ext in exts {
if ALLOWED_PLAINTEXT_EXTS.contains(&ext.get_type())
|| DISALLOWED_TLS13_EXTS.contains(&ext.get_type())
{
common.send_fatal_alert(AlertDescription::UnsupportedExtension);
let msg = "server sent inappropriate encrypted extension".to_string();
return Err(Error::PeerMisbehavedError(msg));
}
}
Ok(())
}
struct ExpectEncryptedExtensions {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls13ClientSessionValue>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleHandshake,
hello: ClientHelloDetails,
}
impl State<ClientConnectionData> for ExpectEncryptedExtensions {
fn handle(mut self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
let exts = require_handshake_msg!(
m,
HandshakeType::EncryptedExtensions,
HandshakePayload::EncryptedExtensions
)?;
debug!("TLS1.3 encrypted extensions: {:?}", exts);
self.transcript.add_message(&m);
validate_encrypted_extensions(cx.common, &self.hello, exts)?;
hs::process_alpn_protocol(cx.common, &self.config, exts.get_alpn_protocol())?;
#[cfg(feature = "quic")]
{
// QUIC transport parameters
if cx.common.is_quic() {
match exts.get_quic_params_extension() {
Some(params) => cx.common.quic.params = Some(params),
None => {
return Err(cx
.common
.missing_extension("QUIC transport parameters not found"));
}
}
}
}
if let Some(resuming_session) = self.resuming_session {
let was_early_traffic = cx.common.early_traffic;
if was_early_traffic {
if exts.early_data_extension_offered() {
cx.data.early_data.accepted();
} else {
cx.data.early_data.rejected();
cx.common.early_traffic = false;
}
}
if was_early_traffic && !cx.common.early_traffic {
// If no early traffic, set the encryption key for handshakes
cx.common
.record_layer
.set_message_encrypter(
self.suite
.derive_encrypter(self.key_schedule.client_key()),
);
}
cx.common.peer_certificates = Some(
resuming_session
.server_cert_chain()
.to_vec(),
);
// We *don't* reverify the certificate chain here: resumption is a
// continuation of the previous session in terms of security policy.
let cert_verified = verify::ServerCertVerified::assertion();
let sig_verified = verify::HandshakeSignatureValid::assertion();
Ok(Box::new(ExpectFinished {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
client_auth: None,
cert_verified,
sig_verified,
}))
} else {
if exts.early_data_extension_offered() {
let msg = "server sent early data extension without resumption".to_string();
return Err(Error::PeerMisbehavedError(msg));
}
Ok(Box::new(ExpectCertificateOrCertReq {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
may_send_sct_list: self.hello.server_may_send_sct_list(),
}))
}
}
}
struct ExpectCertificateOrCertReq {
config: Arc<ClientConfig>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleHandshake,
may_send_sct_list: bool,
}
impl State<ClientConnectionData> for ExpectCertificateOrCertReq {
fn handle(self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
check_message(
&m,
&[ContentType::Handshake],
&[
HandshakeType::Certificate,
HandshakeType::CertificateRequest,
],
)?;
if m.is_handshake_type(HandshakeType::Certificate) {
Box::new(ExpectCertificate {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
may_send_sct_list: self.may_send_sct_list,
client_auth: None,
})
.handle(cx, m)
} else {
Box::new(ExpectCertificateRequest {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
may_send_sct_list: self.may_send_sct_list,
})
.handle(cx, m)
}
}
}
// TLS1.3 version of CertificateRequest handling. We then move to expecting the server
// Certificate. Unfortunately the CertificateRequest type changed in an annoying way
// in TLS1.3.
struct ExpectCertificateRequest {
config: Arc<ClientConfig>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleHandshake,
may_send_sct_list: bool,
}
impl State<ClientConnectionData> for ExpectCertificateRequest {
fn handle(mut self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
let certreq = &require_handshake_msg!(
m,
HandshakeType::CertificateRequest,
HandshakePayload::CertificateRequestTLS13
)?;
self.transcript.add_message(&m);
debug!("Got CertificateRequest {:?}", certreq);
// Fortunately the problems here in TLS1.2 and prior are corrected in
// TLS1.3.
// Must be empty during handshake.
if !certreq.context.0.is_empty() {
warn!("Server sent non-empty certreq context");
cx.common
.send_fatal_alert(AlertDescription::DecodeError);
return Err(Error::CorruptMessagePayload(ContentType::Handshake));
}
let tls13_sign_schemes = sign::supported_sign_tls13();
let no_sigschemes = Vec::new();
let compat_sigschemes = certreq
.get_sigalgs_extension()
.unwrap_or(&no_sigschemes)
.iter()
.cloned()
.filter(|scheme| tls13_sign_schemes.contains(scheme))
.collect::<Vec<SignatureScheme>>();
if compat_sigschemes.is_empty() {
cx.common
.send_fatal_alert(AlertDescription::HandshakeFailure);
return Err(Error::PeerIncompatibleError(
"server sent bad certreq schemes".to_string(),
));
}
let no_canames = Vec::new();
let canames = certreq
.get_authorities_extension()
.unwrap_or(&no_canames)
.iter()
.map(|p| p.0.as_slice())
.collect::<Vec<&[u8]>>();
let maybe_certkey = self
.config
.client_auth_cert_resolver
.resolve(&canames, &compat_sigschemes);
let mut client_auth = ClientAuthDetails::new();
if let Some(certkey) = maybe_certkey {
debug!("Attempting client auth");
let maybe_signer = certkey
.key
.choose_scheme(&compat_sigschemes);
client_auth.certkey = Some(certkey);
client_auth.signer = maybe_signer;
client_auth.auth_context = Some(certreq.context.0.clone());
} else {
debug!("Client auth requested but no cert selected");
}
Ok(Box::new(ExpectCertificate {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
may_send_sct_list: self.may_send_sct_list,
client_auth: Some(client_auth),
}))
}
}
struct ExpectCertificate {
config: Arc<ClientConfig>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleHandshake,
may_send_sct_list: bool,
client_auth: Option<ClientAuthDetails>,
}
impl State<ClientConnectionData> for ExpectCertificate {
fn handle(mut self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
let cert_chain = require_handshake_msg!(
m,
HandshakeType::Certificate,
HandshakePayload::CertificateTLS13
)?;
self.transcript.add_message(&m);
// This is only non-empty for client auth.
if !cert_chain.context.0.is_empty() {
warn!("certificate with non-empty context during handshake");
cx.common
.send_fatal_alert(AlertDescription::DecodeError);
return Err(Error::CorruptMessagePayload(ContentType::Handshake));
}
if cert_chain.any_entry_has_duplicate_extension()
|| cert_chain.any_entry_has_unknown_extension()
{
warn!("certificate chain contains unsolicited/unknown extension");
cx.common
.send_fatal_alert(AlertDescription::UnsupportedExtension);
return Err(Error::PeerMisbehavedError(
"bad cert chain extensions".to_string(),
));
}
let server_cert = ServerCertDetails::new(
cert_chain.convert(),
cert_chain.get_end_entity_ocsp(),
cert_chain.get_end_entity_scts(),
);
if let Some(sct_list) = server_cert.scts.as_ref() {
if hs::sct_list_is_invalid(sct_list) {
let error_msg = "server sent invalid SCT list".to_string();
return Err(Error::PeerMisbehavedError(error_msg));
}
if !self.may_send_sct_list {
let error_msg = "server sent unsolicited SCT list".to_string();
return Err(Error::PeerMisbehavedError(error_msg));
}
}
Ok(Box::new(ExpectCertificateVerify {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
server_cert,
client_auth: self.client_auth,
}))
}
}
// --- TLS1.3 CertificateVerify ---
struct ExpectCertificateVerify {
config: Arc<ClientConfig>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleHandshake,
server_cert: ServerCertDetails,
client_auth: Option<ClientAuthDetails>,
}
impl State<ClientConnectionData> for ExpectCertificateVerify {
fn handle(mut self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
let cert_verify = require_handshake_msg!(
m,
HandshakeType::CertificateVerify,
HandshakePayload::CertificateVerify
)?;
trace!("Server cert is {:?}", self.server_cert.cert_chain);
// 1. Verify the certificate chain.
let (end_entity, intermediates) = self
.server_cert
.cert_chain
.split_first()
.ok_or(Error::NoCertificatesPresented)?;
let now = std::time::SystemTime::now();
let cert_verified = self
.config
.verifier
.verify_server_cert(
end_entity,
intermediates,
&self.server_name,
&mut self.server_cert.scts(),
&self.server_cert.ocsp_response,
now,
)
.map_err(|err| hs::send_cert_error_alert(cx.common, err))?;
// 2. Verify their signature on the handshake.
let handshake_hash = self.transcript.get_current_hash();
let sig_verified = self
.config
.verifier
.verify_tls13_signature(
&verify::construct_tls13_server_verify_message(&handshake_hash),
&self.server_cert.cert_chain[0],
cert_verify,
)
.map_err(|err| hs::send_cert_error_alert(cx.common, err))?;
cx.common.peer_certificates = Some(self.server_cert.cert_chain);
self.transcript.add_message(&m);
Ok(Box::new(ExpectFinished {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
key_schedule: self.key_schedule,
client_auth: self.client_auth,
cert_verified,
sig_verified,
}))
}
}
fn emit_certificate_tls13(
transcript: &mut HandshakeHash,
client_auth: &mut ClientAuthDetails,
common: &mut CommonState,
) {
let context = client_auth
.auth_context
.take()
.unwrap_or_else(Vec::new);
let mut cert_payload = CertificatePayloadTLS13 {
context: PayloadU8::new(context),
entries: Vec::new(),
};
if let Some(cert_key) = &client_auth.certkey {
for cert in &cert_key.cert {
cert_payload
.entries
.push(CertificateEntry::new(cert.clone()));
}
}
let m = Message {
version: ProtocolVersion::TLSv1_3,
payload: MessagePayload::Handshake(HandshakeMessagePayload {
typ: HandshakeType::Certificate,
payload: HandshakePayload::CertificateTLS13(cert_payload),
}),
};
transcript.add_message(&m);
common.send_msg(m, true);
}
fn emit_certverify_tls13(
transcript: &mut HandshakeHash,
client_auth: &mut ClientAuthDetails,
common: &mut CommonState,
) -> Result<(), Error> {
let signer = match client_auth.signer.take() {
Some(s) => s,
None => {
debug!("Skipping certverify message (no client scheme/key)");
return Ok(());
}
};
let message = verify::construct_tls13_client_verify_message(&transcript.get_current_hash());
let scheme = signer.scheme();
let sig = signer.sign(&message)?;
let dss = DigitallySignedStruct::new(scheme, sig);
let m = Message {
version: ProtocolVersion::TLSv1_3,
payload: MessagePayload::Handshake(HandshakeMessagePayload {
typ: HandshakeType::CertificateVerify,
payload: HandshakePayload::CertificateVerify(dss),
}),
};
transcript.add_message(&m);
common.send_msg(m, true);
Ok(())
}
fn emit_finished_tls13(
transcript: &mut HandshakeHash,
verify_data: ring::hmac::Tag,
common: &mut CommonState,
) {
let verify_data_payload = Payload::new(verify_data.as_ref());
let m = Message {
version: ProtocolVersion::TLSv1_3,
payload: MessagePayload::Handshake(HandshakeMessagePayload {
typ: HandshakeType::Finished,
payload: HandshakePayload::Finished(verify_data_payload),
}),
};
transcript.add_message(&m);
common.send_msg(m, true);
}
fn emit_end_of_early_data_tls13(transcript: &mut HandshakeHash, common: &mut CommonState) {
if common.is_quic() {
return;
}
let m = Message {
version: ProtocolVersion::TLSv1_3,
payload: MessagePayload::Handshake(HandshakeMessagePayload {
typ: HandshakeType::EndOfEarlyData,
payload: HandshakePayload::EndOfEarlyData,
}),
};
transcript.add_message(&m);
common.send_msg(m, true);
}
struct ExpectFinished {
config: Arc<ClientConfig>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleHandshake,
client_auth: Option<ClientAuthDetails>,
cert_verified: verify::ServerCertVerified,
sig_verified: verify::HandshakeSignatureValid,
}
impl State<ClientConnectionData> for ExpectFinished {
fn handle(self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
let mut st = *self;
let finished =
require_handshake_msg!(m, HandshakeType::Finished, HandshakePayload::Finished)?;
let handshake_hash = st.transcript.get_current_hash();
let expect_verify_data = st
.key_schedule
.sign_server_finish(&handshake_hash);
let fin = constant_time::verify_slices_are_equal(expect_verify_data.as_ref(), &finished.0)
.map_err(|_| {
cx.common
.send_fatal_alert(AlertDescription::DecryptError);
Error::DecryptError
})
.map(|_| verify::FinishedMessageVerified::assertion())?;
st.transcript.add_message(&m);
let hash_after_handshake = st.transcript.get_current_hash();
/* The EndOfEarlyData message to server is still encrypted with early data keys,
* but appears in the transcript after the server Finished. */
if cx.common.early_traffic {
emit_end_of_early_data_tls13(&mut st.transcript, cx.common);
cx.common.early_traffic = false;
cx.data.early_data.finished();
cx.common
.record_layer
.set_message_encrypter(
st.suite
.derive_encrypter(st.key_schedule.client_key()),
);
}
/* Send our authentication/finished messages. These are still encrypted
* with our handshake keys. */
if let Some(client_auth) = &mut st.client_auth {
emit_certificate_tls13(&mut st.transcript, client_auth, cx.common);
emit_certverify_tls13(&mut st.transcript, client_auth, cx.common)?;
}
let (key_schedule_finished, client_key, server_key) = st
.key_schedule
.into_traffic_with_client_finished_pending(
hash_after_handshake,
&*st.config.key_log,
&st.randoms.client,
);
let handshake_hash = st.transcript.get_current_hash();
let (key_schedule_traffic, verify_data, _) =
key_schedule_finished.sign_client_finish(&handshake_hash);
emit_finished_tls13(&mut st.transcript, verify_data, cx.common);
/* Now move to our application traffic keys. */
cx.common.check_aligned_handshake()?;
cx.common
.record_layer
.set_message_decrypter(st.suite.derive_decrypter(&server_key));
cx.common
.record_layer
.set_message_encrypter(st.suite.derive_encrypter(&client_key));
cx.common.start_traffic();
let st = ExpectTraffic {
config: st.config,
server_name: st.server_name,
suite: st.suite,
transcript: st.transcript,
key_schedule: key_schedule_traffic,
want_write_key_update: false,
_cert_verified: st.cert_verified,
_sig_verified: st.sig_verified,
_fin_verified: fin,
};
#[cfg(feature = "quic")]
{
if cx.common.protocol == Protocol::Quic {
cx.common.quic.traffic_secrets =
Some(quic::Secrets::new(client_key, server_key, st.suite, true));
return Ok(Box::new(ExpectQuicTraffic(st)));
}
}
Ok(Box::new(st))
}
}
// -- Traffic transit state (TLS1.3) --
// In this state we can be sent tickets, key updates,
// and application data.
struct ExpectTraffic {
config: Arc<ClientConfig>,
server_name: ServerName,
suite: &'static Tls13CipherSuite,
transcript: HandshakeHash,
key_schedule: KeyScheduleTraffic,
want_write_key_update: bool,
_cert_verified: verify::ServerCertVerified,
_sig_verified: verify::HandshakeSignatureValid,
_fin_verified: verify::FinishedMessageVerified,
}
impl ExpectTraffic {
#[allow(clippy::unnecessary_wraps)] // returns Err for #[cfg(feature = "quic")]
fn handle_new_ticket_tls13(
&mut self,
cx: &mut ClientContext<'_>,
nst: &NewSessionTicketPayloadTLS13,
) -> Result<(), Error> {
let handshake_hash = self.transcript.get_current_hash();
let secret = self
.key_schedule
.resumption_master_secret_and_derive_ticket_psk(&handshake_hash, &nst.nonce.0);
let time_now = match TimeBase::now() {
Ok(t) => t,
#[allow(unused_variables)]
Err(e) => {
debug!("Session not saved: {}", e);
return Ok(());
}
};
let value = persist::Tls13ClientSessionValue::new(
self.suite,
nst.ticket.0.clone(),
secret,
cx.common
.peer_certificates
.clone()
.unwrap_or_default(),
time_now,
nst.lifetime,
nst.age_add,
nst.get_max_early_data_size()
.unwrap_or_default(),
);
#[cfg(feature = "quic")]
if let Some(sz) = nst.get_max_early_data_size() {
if cx.common.protocol == Protocol::Quic && sz != 0 && sz != 0xffff_ffff {
return Err(Error::PeerMisbehavedError(
"invalid max_early_data_size".into(),
));
}
}
let key = persist::ClientSessionKey::session_for_server_name(&self.server_name);
#[allow(unused_mut)]
let mut ticket = value.get_encoding();
#[cfg(feature = "quic")]
if let (Protocol::Quic, Some(ref quic_params)) =
(cx.common.protocol, &cx.common.quic.params)
{
PayloadU16::encode_slice(quic_params, &mut ticket);
}
let worked = self
.config
.session_storage
.put(key.get_encoding(), ticket);
if worked {
debug!("Ticket saved");
} else {
debug!("Ticket not saved");
}
Ok(())
}
fn handle_key_update(
&mut self,
common: &mut CommonState,
kur: &KeyUpdateRequest,
) -> Result<(), Error> {
#[cfg(feature = "quic")]
{
if let Protocol::Quic = common.protocol {
common.send_fatal_alert(AlertDescription::UnexpectedMessage);
let msg = "KeyUpdate received in QUIC connection".to_string();
warn!("{}", msg);
return Err(Error::PeerMisbehavedError(msg));
}
}
// Mustn't be interleaved with other handshake messages.
common.check_aligned_handshake()?;
match kur {
KeyUpdateRequest::UpdateNotRequested => {}
KeyUpdateRequest::UpdateRequested => {
self.want_write_key_update = true;
}
_ => {
common.send_fatal_alert(AlertDescription::IllegalParameter);
return Err(Error::CorruptMessagePayload(ContentType::Handshake));
}
}
// Update our read-side keys.
let new_read_key = self
.key_schedule
.next_server_application_traffic_secret();
common
.record_layer
.set_message_decrypter(
self.suite
.derive_decrypter(&new_read_key),
);
Ok(())
}
}
impl State<ClientConnectionData> for ExpectTraffic {
fn handle(mut self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
match m.payload {
MessagePayload::ApplicationData(payload) => cx
.common
.take_received_plaintext(payload),
MessagePayload::Handshake(payload) => match payload.payload {
HandshakePayload::NewSessionTicketTLS13(new_ticket) => {
self.handle_new_ticket_tls13(cx, &new_ticket)?
}
HandshakePayload::KeyUpdate(key_update) => {
self.handle_key_update(cx.common, &key_update)?
}
_ => {
return Err(inappropriate_handshake_message(
&payload,
&[HandshakeType::NewSessionTicket, HandshakeType::KeyUpdate],
));
}
},
_ => {
return Err(inappropriate_message(
&m,
&[ContentType::ApplicationData, ContentType::Handshake],
));
}
}
Ok(self)
}
fn export_keying_material(
&self,
output: &mut [u8],
label: &[u8],
context: Option<&[u8]>,
) -> Result<(), Error> {
self.key_schedule
.export_keying_material(output, label, context)
}
fn perhaps_write_key_update(&mut self, common: &mut CommonState) {
if self.want_write_key_update {
self.want_write_key_update = false;
common.send_msg_encrypt(Message::build_key_update_notify().into());
let write_key = self
.key_schedule
.next_client_application_traffic_secret();
common
.record_layer
.set_message_encrypter(self.suite.derive_encrypter(&write_key));
}
}
}
#[cfg(feature = "quic")]
struct ExpectQuicTraffic(ExpectTraffic);
#[cfg(feature = "quic")]
impl State<ClientConnectionData> for ExpectQuicTraffic {
fn handle(mut self: Box<Self>, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError {
let nst = require_handshake_msg!(
m,
HandshakeType::NewSessionTicket,
HandshakePayload::NewSessionTicketTLS13
)?;
self.0
.handle_new_ticket_tls13(cx, nst)?;
Ok(self)
}
fn export_keying_material(
&self,
output: &mut [u8],
label: &[u8],
context: Option<&[u8]>,
) -> Result<(), Error> {
self.0
.export_keying_material(output, label, context)
}
}
| 34.413025 | 100 | 0.611554 |
ac93d274735114fe3ff6acb83a82748e74849113 | 114 | fn main() {
// ANCHOR: here
struct AlwaysEqual;
let subject = AlwaysEqual;
// ANCHOR_END: here
}
| 14.25 | 30 | 0.596491 |
22a7f4f476c9b15905f51d653a51d465302cbf12 | 1,025 | #[doc = "Reader of register TXDATA"]
pub type R = crate::R<u32, super::TXDATA>;
#[doc = "Writer for register TXDATA"]
pub type W = crate::W<u32, super::TXDATA>;
#[doc = "Register TXDATA `reset()`'s with value 0"]
impl crate::ResetValue for super::TXDATA {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type { 0 }
}
#[doc = "Reader of field `TXDATA`"]
pub type TXDATA_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `TXDATA`"]
pub struct TXDATA_W<'a> {
w: &'a mut W,
}
impl<'a> TXDATA_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - TX Data"]
#[inline(always)]
pub fn txdata(&self) -> TXDATA_R { TXDATA_R::new((self.bits & 0xff) as u8) }
}
impl W {
#[doc = "Bits 0:7 - TX Data"]
#[inline(always)]
pub fn txdata(&mut self) -> TXDATA_W { TXDATA_W { w: self } }
}
| 29.285714 | 80 | 0.579512 |
bbf90935a7fd07b7aadaf94ba7ef394f50261523 | 81,344 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AddressSpace {
#[serde(rename = "addressPrefixes", default, skip_serializing_if = "Vec::is_empty")]
pub address_prefixes: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGateway {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayBackendAddress {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fqdn: Option<String>,
#[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayBackendAddressPool {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayBackendAddressPoolPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayBackendAddressPoolPropertiesFormat {
#[serde(rename = "backendIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub backend_ip_configurations: Vec<NetworkInterfaceIpConfiguration>,
#[serde(rename = "backendAddresses", default, skip_serializing_if = "Vec::is_empty")]
pub backend_addresses: Vec<ApplicationGatewayBackendAddress>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayBackendHttpSettings {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayBackendHttpSettingsPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayBackendHttpSettingsPropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<application_gateway_backend_http_settings_properties_format::Protocol>,
#[serde(rename = "cookieBasedAffinity", default, skip_serializing_if = "Option::is_none")]
pub cookie_based_affinity: Option<application_gateway_backend_http_settings_properties_format::CookieBasedAffinity>,
#[serde(rename = "requestTimeout", default, skip_serializing_if = "Option::is_none")]
pub request_timeout: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub probe: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod application_gateway_backend_http_settings_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Http,
Https,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CookieBasedAffinity {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayFrontendIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayFrontendIpConfigurationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayFrontendIpConfigurationPropertiesFormat {
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "privateIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub private_ip_allocation_method: Option<application_gateway_frontend_ip_configuration_properties_format::PrivateIpAllocationMethod>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<SubResource>,
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod application_gateway_frontend_ip_configuration_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAllocationMethod {
Static,
Dynamic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayFrontendPort {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayFrontendPortPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayFrontendPortPropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<i32>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayHttpListener {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayHttpListenerPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayHttpListenerPropertiesFormat {
#[serde(rename = "frontendIPConfiguration", default, skip_serializing_if = "Option::is_none")]
pub frontend_ip_configuration: Option<SubResource>,
#[serde(rename = "frontendPort", default, skip_serializing_if = "Option::is_none")]
pub frontend_port: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<application_gateway_http_listener_properties_format::Protocol>,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<String>,
#[serde(rename = "sslCertificate", default, skip_serializing_if = "Option::is_none")]
pub ssl_certificate: Option<SubResource>,
#[serde(rename = "requireServerNameIndication", default, skip_serializing_if = "Option::is_none")]
pub require_server_name_indication: Option<bool>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod application_gateway_http_listener_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Http,
Https,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayIpConfigurationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayIpConfigurationPropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ApplicationGateway>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayPathRule {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayPathRulePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayPathRulePropertiesFormat {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub paths: Vec<String>,
#[serde(rename = "backendAddressPool", default, skip_serializing_if = "Option::is_none")]
pub backend_address_pool: Option<SubResource>,
#[serde(rename = "backendHttpSettings", default, skip_serializing_if = "Option::is_none")]
pub backend_http_settings: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayProbe {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayProbePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayProbePropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<application_gateway_probe_properties_format::Protocol>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub host: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<i32>,
#[serde(rename = "unhealthyThreshold", default, skip_serializing_if = "Option::is_none")]
pub unhealthy_threshold: Option<i32>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod application_gateway_probe_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Http,
Https,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayPropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ApplicationGatewaySku>,
#[serde(rename = "operationalState", default, skip_serializing_if = "Option::is_none")]
pub operational_state: Option<application_gateway_properties_format::OperationalState>,
#[serde(rename = "gatewayIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub gateway_ip_configurations: Vec<ApplicationGatewayIpConfiguration>,
#[serde(rename = "sslCertificates", default, skip_serializing_if = "Vec::is_empty")]
pub ssl_certificates: Vec<ApplicationGatewaySslCertificate>,
#[serde(rename = "frontendIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub frontend_ip_configurations: Vec<ApplicationGatewayFrontendIpConfiguration>,
#[serde(rename = "frontendPorts", default, skip_serializing_if = "Vec::is_empty")]
pub frontend_ports: Vec<ApplicationGatewayFrontendPort>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub probes: Vec<ApplicationGatewayProbe>,
#[serde(rename = "backendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub backend_address_pools: Vec<ApplicationGatewayBackendAddressPool>,
#[serde(rename = "backendHttpSettingsCollection", default, skip_serializing_if = "Vec::is_empty")]
pub backend_http_settings_collection: Vec<ApplicationGatewayBackendHttpSettings>,
#[serde(rename = "httpListeners", default, skip_serializing_if = "Vec::is_empty")]
pub http_listeners: Vec<ApplicationGatewayHttpListener>,
#[serde(rename = "urlPathMaps", default, skip_serializing_if = "Vec::is_empty")]
pub url_path_maps: Vec<ApplicationGatewayUrlPathMap>,
#[serde(rename = "requestRoutingRules", default, skip_serializing_if = "Vec::is_empty")]
pub request_routing_rules: Vec<ApplicationGatewayRequestRoutingRule>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod application_gateway_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationalState {
Stopped,
Starting,
Running,
Stopping,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayRequestRoutingRule {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayRequestRoutingRulePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayRequestRoutingRulePropertiesFormat {
#[serde(rename = "ruleType", default, skip_serializing_if = "Option::is_none")]
pub rule_type: Option<application_gateway_request_routing_rule_properties_format::RuleType>,
#[serde(rename = "backendAddressPool", default, skip_serializing_if = "Option::is_none")]
pub backend_address_pool: Option<SubResource>,
#[serde(rename = "backendHttpSettings", default, skip_serializing_if = "Option::is_none")]
pub backend_http_settings: Option<SubResource>,
#[serde(rename = "httpListener", default, skip_serializing_if = "Option::is_none")]
pub http_listener: Option<SubResource>,
#[serde(rename = "urlPathMap", default, skip_serializing_if = "Option::is_none")]
pub url_path_map: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod application_gateway_request_routing_rule_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RuleType {
Basic,
PathBasedRouting,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewaySku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<application_gateway_sku::Name>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<application_gateway_sku::Tier>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i32>,
}
pub mod application_gateway_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
#[serde(rename = "Standard_Small")]
StandardSmall,
#[serde(rename = "Standard_Medium")]
StandardMedium,
#[serde(rename = "Standard_Large")]
StandardLarge,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Standard,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewaySslCertificate {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewaySslCertificatePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewaySslCertificatePropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub data: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "publicCertData", default, skip_serializing_if = "Option::is_none")]
pub public_cert_data: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayUrlPathMap {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApplicationGatewayUrlPathMapPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationGatewayUrlPathMapPropertiesFormat {
#[serde(rename = "defaultBackendAddressPool", default, skip_serializing_if = "Option::is_none")]
pub default_backend_address_pool: Option<SubResource>,
#[serde(rename = "defaultBackendHttpSettings", default, skip_serializing_if = "Option::is_none")]
pub default_backend_http_settings: Option<SubResource>,
#[serde(rename = "pathRules", default, skip_serializing_if = "Vec::is_empty")]
pub path_rules: Vec<ApplicationGatewayPathRule>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteCircuitAuthorization>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationPropertiesFormat {
#[serde(rename = "authorizationKey", default, skip_serializing_if = "Option::is_none")]
pub authorization_key: Option<String>,
#[serde(rename = "authorizationUseStatus", default, skip_serializing_if = "Option::is_none")]
pub authorization_use_status: Option<authorization_properties_format::AuthorizationUseStatus>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod authorization_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthorizationUseStatus {
Available,
InUse,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureAsyncOperationResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<azure_async_operation_result::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<Error>,
}
pub mod azure_async_operation_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
InProgress,
Succeeded,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendAddressPool {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<BackendAddressPoolPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendAddressPoolPropertiesFormat {
#[serde(rename = "backendIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub backend_ip_configurations: Vec<NetworkInterfaceIpConfiguration>,
#[serde(rename = "loadBalancingRules", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancing_rules: Vec<SubResource>,
#[serde(rename = "outboundNatRule", default, skip_serializing_if = "Option::is_none")]
pub outbound_nat_rule: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BgpSettings {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub asn: Option<i64>,
#[serde(rename = "bgpPeeringAddress", default, skip_serializing_if = "Option::is_none")]
pub bgp_peering_address: Option<String>,
#[serde(rename = "peerWeight", default, skip_serializing_if = "Option::is_none")]
pub peer_weight: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectionResetSharedKey {
#[serde(rename = "keyLength", default, skip_serializing_if = "Option::is_none")]
pub key_length: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectionSharedKey {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectionSharedKeyResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DhcpOptions {
#[serde(rename = "dnsServers", default, skip_serializing_if = "Vec::is_empty")]
pub dns_servers: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DnsNameAvailabilityResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub available: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Error {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetails>,
#[serde(rename = "innerError", default, skip_serializing_if = "Option::is_none")]
pub inner_error: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuit {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ExpressRouteCircuitSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ExpressRouteCircuitPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitArpTable {
#[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
#[serde(rename = "macAddress", default, skip_serializing_if = "Option::is_none")]
pub mac_address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitAuthorization {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AuthorizationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteCircuit>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitPeering {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ExpressRouteCircuitPeeringPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitPeeringConfig {
#[serde(rename = "advertisedPublicPrefixes", default, skip_serializing_if = "Vec::is_empty")]
pub advertised_public_prefixes: Vec<String>,
#[serde(rename = "advertisedPublicPrefixesState", default, skip_serializing_if = "Option::is_none")]
pub advertised_public_prefixes_state: Option<express_route_circuit_peering_config::AdvertisedPublicPrefixesState>,
#[serde(rename = "customerASN", default, skip_serializing_if = "Option::is_none")]
pub customer_asn: Option<i32>,
#[serde(rename = "routingRegistryName", default, skip_serializing_if = "Option::is_none")]
pub routing_registry_name: Option<String>,
}
pub mod express_route_circuit_peering_config {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AdvertisedPublicPrefixesState {
NotConfigured,
Configuring,
Configured,
ValidationNeeded,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitPeeringListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteCircuitPeering>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitPeeringPropertiesFormat {
#[serde(rename = "peeringType", default, skip_serializing_if = "Option::is_none")]
pub peering_type: Option<express_route_circuit_peering_properties_format::PeeringType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<express_route_circuit_peering_properties_format::State>,
#[serde(rename = "azureASN", default, skip_serializing_if = "Option::is_none")]
pub azure_asn: Option<i32>,
#[serde(rename = "peerASN", default, skip_serializing_if = "Option::is_none")]
pub peer_asn: Option<i32>,
#[serde(rename = "primaryPeerAddressPrefix", default, skip_serializing_if = "Option::is_none")]
pub primary_peer_address_prefix: Option<String>,
#[serde(rename = "secondaryPeerAddressPrefix", default, skip_serializing_if = "Option::is_none")]
pub secondary_peer_address_prefix: Option<String>,
#[serde(rename = "primaryAzurePort", default, skip_serializing_if = "Option::is_none")]
pub primary_azure_port: Option<String>,
#[serde(rename = "secondaryAzurePort", default, skip_serializing_if = "Option::is_none")]
pub secondary_azure_port: Option<String>,
#[serde(rename = "sharedKey", default, skip_serializing_if = "Option::is_none")]
pub shared_key: Option<String>,
#[serde(rename = "vlanId", default, skip_serializing_if = "Option::is_none")]
pub vlan_id: Option<i32>,
#[serde(rename = "microsoftPeeringConfig", default, skip_serializing_if = "Option::is_none")]
pub microsoft_peering_config: Option<ExpressRouteCircuitPeeringConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub stats: Option<ExpressRouteCircuitStats>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod express_route_circuit_peering_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PeeringType {
AzurePublicPeering,
AzurePrivatePeering,
MicrosoftPeering,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitPropertiesFormat {
#[serde(rename = "circuitProvisioningState", default, skip_serializing_if = "Option::is_none")]
pub circuit_provisioning_state: Option<String>,
#[serde(rename = "serviceProviderProvisioningState", default, skip_serializing_if = "Option::is_none")]
pub service_provider_provisioning_state: Option<express_route_circuit_properties_format::ServiceProviderProvisioningState>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub authorizations: Vec<ExpressRouteCircuitAuthorization>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub peerings: Vec<ExpressRouteCircuitPeering>,
#[serde(rename = "serviceKey", default, skip_serializing_if = "Option::is_none")]
pub service_key: Option<String>,
#[serde(rename = "serviceProviderNotes", default, skip_serializing_if = "Option::is_none")]
pub service_provider_notes: Option<String>,
#[serde(rename = "serviceProviderProperties", default, skip_serializing_if = "Option::is_none")]
pub service_provider_properties: Option<ExpressRouteCircuitServiceProviderProperties>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod express_route_circuit_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceProviderProvisioningState {
NotProvisioned,
Provisioning,
Provisioned,
Deprovisioning,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitRoutesTable {
#[serde(rename = "addressPrefix", default, skip_serializing_if = "Option::is_none")]
pub address_prefix: Option<String>,
#[serde(rename = "nextHopType")]
pub next_hop_type: express_route_circuit_routes_table::NextHopType,
#[serde(rename = "nextHopIP", default, skip_serializing_if = "Option::is_none")]
pub next_hop_ip: Option<String>,
#[serde(rename = "asPath", default, skip_serializing_if = "Option::is_none")]
pub as_path: Option<String>,
}
pub mod express_route_circuit_routes_table {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NextHopType {
VirtualNetworkGateway,
VnetLocal,
Internet,
VirtualAppliance,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitServiceProviderProperties {
#[serde(rename = "serviceProviderName", default, skip_serializing_if = "Option::is_none")]
pub service_provider_name: Option<String>,
#[serde(rename = "peeringLocation", default, skip_serializing_if = "Option::is_none")]
pub peering_location: Option<String>,
#[serde(rename = "bandwidthInMbps", default, skip_serializing_if = "Option::is_none")]
pub bandwidth_in_mbps: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<express_route_circuit_sku::Tier>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<express_route_circuit_sku::Family>,
}
pub mod express_route_circuit_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Standard,
Premium,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Family {
UnlimitedData,
MeteredData,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitStats {
#[serde(rename = "bytesIn", default, skip_serializing_if = "Option::is_none")]
pub bytes_in: Option<i32>,
#[serde(rename = "bytesOut", default, skip_serializing_if = "Option::is_none")]
pub bytes_out: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitsArpTableListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteCircuitArpTable>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitsRoutesTableListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteCircuitRoutesTable>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteCircuitsStatsListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteCircuitStats>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteServiceProvider {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ExpressRouteServiceProviderPropertiesFormat>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteServiceProviderBandwidthsOffered {
#[serde(rename = "offerName", default, skip_serializing_if = "Option::is_none")]
pub offer_name: Option<String>,
#[serde(rename = "valueInMbps", default, skip_serializing_if = "Option::is_none")]
pub value_in_mbps: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteServiceProviderListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExpressRouteServiceProvider>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressRouteServiceProviderPropertiesFormat {
#[serde(rename = "peeringLocations", default, skip_serializing_if = "Vec::is_empty")]
pub peering_locations: Vec<String>,
#[serde(rename = "bandwidthsOffered", default, skip_serializing_if = "Vec::is_empty")]
pub bandwidths_offered: Vec<ExpressRouteServiceProviderBandwidthsOffered>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FrontendIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FrontendIpConfigurationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FrontendIpConfigurationPropertiesFormat {
#[serde(rename = "inboundNatRules", default, skip_serializing_if = "Vec::is_empty")]
pub inbound_nat_rules: Vec<SubResource>,
#[serde(rename = "inboundNatPools", default, skip_serializing_if = "Vec::is_empty")]
pub inbound_nat_pools: Vec<SubResource>,
#[serde(rename = "outboundNatRules", default, skip_serializing_if = "Vec::is_empty")]
pub outbound_nat_rules: Vec<SubResource>,
#[serde(rename = "loadBalancingRules", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancing_rules: Vec<SubResource>,
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "privateIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub private_ip_allocation_method: Option<frontend_ip_configuration_properties_format::PrivateIpAllocationMethod>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<Subnet>,
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<PublicIpAddress>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod frontend_ip_configuration_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAllocationMethod {
Static,
Dynamic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IpConfigurationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IpConfigurationPropertiesFormat {
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "privateIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub private_ip_allocation_method: Option<ip_configuration_properties_format::PrivateIpAllocationMethod>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<Subnet>,
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<PublicIpAddress>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod ip_configuration_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAllocationMethod {
Static,
Dynamic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InboundNatPool {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<InboundNatPoolPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InboundNatPoolPropertiesFormat {
#[serde(rename = "frontendIPConfiguration", default, skip_serializing_if = "Option::is_none")]
pub frontend_ip_configuration: Option<SubResource>,
pub protocol: inbound_nat_pool_properties_format::Protocol,
#[serde(rename = "frontendPortRangeStart")]
pub frontend_port_range_start: i32,
#[serde(rename = "frontendPortRangeEnd")]
pub frontend_port_range_end: i32,
#[serde(rename = "backendPort")]
pub backend_port: i32,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod inbound_nat_pool_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Udp,
Tcp,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InboundNatRule {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<InboundNatRulePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InboundNatRulePropertiesFormat {
#[serde(rename = "frontendIPConfiguration", default, skip_serializing_if = "Option::is_none")]
pub frontend_ip_configuration: Option<SubResource>,
#[serde(rename = "backendIPConfiguration", default, skip_serializing_if = "Option::is_none")]
pub backend_ip_configuration: Option<NetworkInterfaceIpConfiguration>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<inbound_nat_rule_properties_format::Protocol>,
#[serde(rename = "frontendPort", default, skip_serializing_if = "Option::is_none")]
pub frontend_port: Option<i32>,
#[serde(rename = "backendPort", default, skip_serializing_if = "Option::is_none")]
pub backend_port: Option<i32>,
#[serde(rename = "idleTimeoutInMinutes", default, skip_serializing_if = "Option::is_none")]
pub idle_timeout_in_minutes: Option<i32>,
#[serde(rename = "enableFloatingIP", default, skip_serializing_if = "Option::is_none")]
pub enable_floating_ip: Option<bool>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod inbound_nat_rule_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Udp,
Tcp,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancer {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LoadBalancerPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<LoadBalancer>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerPropertiesFormat {
#[serde(rename = "frontendIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub frontend_ip_configurations: Vec<FrontendIpConfiguration>,
#[serde(rename = "backendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub backend_address_pools: Vec<BackendAddressPool>,
#[serde(rename = "loadBalancingRules", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancing_rules: Vec<LoadBalancingRule>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub probes: Vec<Probe>,
#[serde(rename = "inboundNatRules", default, skip_serializing_if = "Vec::is_empty")]
pub inbound_nat_rules: Vec<InboundNatRule>,
#[serde(rename = "inboundNatPools", default, skip_serializing_if = "Vec::is_empty")]
pub inbound_nat_pools: Vec<InboundNatPool>,
#[serde(rename = "outboundNatRules", default, skip_serializing_if = "Vec::is_empty")]
pub outbound_nat_rules: Vec<OutboundNatRule>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancingRule {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LoadBalancingRulePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancingRulePropertiesFormat {
#[serde(rename = "frontendIPConfiguration", default, skip_serializing_if = "Option::is_none")]
pub frontend_ip_configuration: Option<SubResource>,
#[serde(rename = "backendAddressPool", default, skip_serializing_if = "Option::is_none")]
pub backend_address_pool: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub probe: Option<SubResource>,
pub protocol: load_balancing_rule_properties_format::Protocol,
#[serde(rename = "loadDistribution", default, skip_serializing_if = "Option::is_none")]
pub load_distribution: Option<load_balancing_rule_properties_format::LoadDistribution>,
#[serde(rename = "frontendPort")]
pub frontend_port: i32,
#[serde(rename = "backendPort", default, skip_serializing_if = "Option::is_none")]
pub backend_port: Option<i32>,
#[serde(rename = "idleTimeoutInMinutes", default, skip_serializing_if = "Option::is_none")]
pub idle_timeout_in_minutes: Option<i32>,
#[serde(rename = "enableFloatingIP", default, skip_serializing_if = "Option::is_none")]
pub enable_floating_ip: Option<bool>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod load_balancing_rule_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Udp,
Tcp,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LoadDistribution {
Default,
#[serde(rename = "SourceIP")]
SourceIp,
#[serde(rename = "SourceIPProtocol")]
SourceIpProtocol,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LocalNetworkGateway {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LocalNetworkGatewayPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LocalNetworkGatewayListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<LocalNetworkGateway>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LocalNetworkGatewayPropertiesFormat {
#[serde(rename = "localNetworkAddressSpace", default, skip_serializing_if = "Option::is_none")]
pub local_network_address_space: Option<AddressSpace>,
#[serde(rename = "gatewayIpAddress", default, skip_serializing_if = "Option::is_none")]
pub gateway_ip_address: Option<String>,
#[serde(rename = "bgpSettings", default, skip_serializing_if = "Option::is_none")]
pub bgp_settings: Option<BgpSettings>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterface {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NetworkInterfacePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfaceDnsSettings {
#[serde(rename = "dnsServers", default, skip_serializing_if = "Vec::is_empty")]
pub dns_servers: Vec<String>,
#[serde(rename = "appliedDnsServers", default, skip_serializing_if = "Vec::is_empty")]
pub applied_dns_servers: Vec<String>,
#[serde(rename = "internalDnsNameLabel", default, skip_serializing_if = "Option::is_none")]
pub internal_dns_name_label: Option<String>,
#[serde(rename = "internalFqdn", default, skip_serializing_if = "Option::is_none")]
pub internal_fqdn: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfaceIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NetworkInterfaceIpConfigurationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfaceIpConfigurationPropertiesFormat {
#[serde(rename = "loadBalancerBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_backend_address_pools: Vec<BackendAddressPool>,
#[serde(rename = "loadBalancerInboundNatRules", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_inbound_nat_rules: Vec<InboundNatRule>,
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "privateIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub private_ip_allocation_method: Option<network_interface_ip_configuration_properties_format::PrivateIpAllocationMethod>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<Subnet>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<PublicIpAddress>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod network_interface_ip_configuration_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAllocationMethod {
Static,
Dynamic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfaceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<NetworkInterface>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfacePropertiesFormat {
#[serde(rename = "virtualMachine", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine: Option<SubResource>,
#[serde(rename = "networkSecurityGroup", default, skip_serializing_if = "Option::is_none")]
pub network_security_group: Option<NetworkSecurityGroup>,
#[serde(rename = "ipConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub ip_configurations: Vec<NetworkInterfaceIpConfiguration>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<NetworkInterfaceDnsSettings>,
#[serde(rename = "macAddress", default, skip_serializing_if = "Option::is_none")]
pub mac_address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "enableIPForwarding", default, skip_serializing_if = "Option::is_none")]
pub enable_ip_forwarding: Option<bool>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkSecurityGroup {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NetworkSecurityGroupPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkSecurityGroupListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<NetworkSecurityGroup>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkSecurityGroupPropertiesFormat {
#[serde(rename = "securityRules", default, skip_serializing_if = "Vec::is_empty")]
pub security_rules: Vec<SecurityRule>,
#[serde(rename = "defaultSecurityRules", default, skip_serializing_if = "Vec::is_empty")]
pub default_security_rules: Vec<SecurityRule>,
#[serde(rename = "networkInterfaces", default, skip_serializing_if = "Vec::is_empty")]
pub network_interfaces: Vec<NetworkInterface>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub subnets: Vec<Subnet>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OutboundNatRule {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OutboundNatRulePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OutboundNatRulePropertiesFormat {
#[serde(rename = "allocatedOutboundPorts", default, skip_serializing_if = "Option::is_none")]
pub allocated_outbound_ports: Option<i32>,
#[serde(rename = "frontendIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub frontend_ip_configurations: Vec<SubResource>,
#[serde(rename = "backendAddressPool")]
pub backend_address_pool: SubResource,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Probe {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ProbePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProbePropertiesFormat {
#[serde(rename = "loadBalancingRules", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancing_rules: Vec<SubResource>,
pub protocol: probe_properties_format::Protocol,
pub port: i32,
#[serde(rename = "intervalInSeconds", default, skip_serializing_if = "Option::is_none")]
pub interval_in_seconds: Option<i32>,
#[serde(rename = "numberOfProbes", default, skip_serializing_if = "Option::is_none")]
pub number_of_probes: Option<i32>,
#[serde(rename = "requestPath", default, skip_serializing_if = "Option::is_none")]
pub request_path: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod probe_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Http,
Tcp,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicIpAddress {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Box<Option<PublicIpAddressPropertiesFormat>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicIpAddressDnsSettings {
#[serde(rename = "domainNameLabel", default, skip_serializing_if = "Option::is_none")]
pub domain_name_label: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fqdn: Option<String>,
#[serde(rename = "reverseFqdn", default, skip_serializing_if = "Option::is_none")]
pub reverse_fqdn: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicIpAddressListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PublicIpAddress>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicIpAddressPropertiesFormat {
#[serde(rename = "publicIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub public_ip_allocation_method: Option<public_ip_address_properties_format::PublicIpAllocationMethod>,
#[serde(rename = "ipConfiguration", default, skip_serializing_if = "Option::is_none")]
pub ip_configuration: Option<IpConfiguration>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<PublicIpAddressDnsSettings>,
#[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
#[serde(rename = "idleTimeoutInMinutes", default, skip_serializing_if = "Option::is_none")]
pub idle_timeout_in_minutes: Option<i32>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod public_ip_address_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicIpAllocationMethod {
Static,
Dynamic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Route {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RoutePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RouteListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Route>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoutePropertiesFormat {
#[serde(rename = "addressPrefix", default, skip_serializing_if = "Option::is_none")]
pub address_prefix: Option<String>,
#[serde(rename = "nextHopType")]
pub next_hop_type: route_properties_format::NextHopType,
#[serde(rename = "nextHopIpAddress", default, skip_serializing_if = "Option::is_none")]
pub next_hop_ip_address: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod route_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NextHopType {
VirtualNetworkGateway,
VnetLocal,
Internet,
VirtualAppliance,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RouteTable {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RouteTablePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RouteTableListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RouteTable>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RouteTablePropertiesFormat {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub routes: Vec<Route>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub subnets: Vec<Subnet>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SecurityRule {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SecurityRulePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SecurityRuleListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SecurityRule>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SecurityRulePropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
pub protocol: security_rule_properties_format::Protocol,
#[serde(rename = "sourcePortRange", default, skip_serializing_if = "Option::is_none")]
pub source_port_range: Option<String>,
#[serde(rename = "destinationPortRange", default, skip_serializing_if = "Option::is_none")]
pub destination_port_range: Option<String>,
#[serde(rename = "sourceAddressPrefix")]
pub source_address_prefix: String,
#[serde(rename = "destinationAddressPrefix")]
pub destination_address_prefix: String,
pub access: security_rule_properties_format::Access,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority: Option<i32>,
pub direction: security_rule_properties_format::Direction,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod security_rule_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Tcp,
Udp,
#[serde(rename = "*")]
U2a,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Access {
Allow,
Deny,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Direction {
Inbound,
Outbound,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Subnet {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SubnetPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubnetListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Subnet>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubnetPropertiesFormat {
#[serde(rename = "addressPrefix", default, skip_serializing_if = "Option::is_none")]
pub address_prefix: Option<String>,
#[serde(rename = "networkSecurityGroup", default, skip_serializing_if = "Option::is_none")]
pub network_security_group: Option<NetworkSecurityGroup>,
#[serde(rename = "routeTable", default, skip_serializing_if = "Option::is_none")]
pub route_table: Option<RouteTable>,
#[serde(rename = "ipConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub ip_configurations: Vec<IpConfiguration>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Usage {
pub unit: usage::Unit,
#[serde(rename = "currentValue")]
pub current_value: i64,
pub limit: i64,
pub name: UsageName,
}
pub mod usage {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsageName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsagesListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Usage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetwork {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualNetworkPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGateway {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualNetworkGatewayPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualNetworkGatewayConnectionPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayConnectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualNetworkGatewayConnection>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayConnectionPropertiesFormat {
#[serde(rename = "authorizationKey", default, skip_serializing_if = "Option::is_none")]
pub authorization_key: Option<String>,
#[serde(rename = "virtualNetworkGateway1", default, skip_serializing_if = "Option::is_none")]
pub virtual_network_gateway1: Option<VirtualNetworkGateway>,
#[serde(rename = "virtualNetworkGateway2", default, skip_serializing_if = "Option::is_none")]
pub virtual_network_gateway2: Option<VirtualNetworkGateway>,
#[serde(rename = "localNetworkGateway2", default, skip_serializing_if = "Option::is_none")]
pub local_network_gateway2: Option<LocalNetworkGateway>,
#[serde(rename = "connectionType", default, skip_serializing_if = "Option::is_none")]
pub connection_type: Option<virtual_network_gateway_connection_properties_format::ConnectionType>,
#[serde(rename = "routingWeight", default, skip_serializing_if = "Option::is_none")]
pub routing_weight: Option<i32>,
#[serde(rename = "sharedKey", default, skip_serializing_if = "Option::is_none")]
pub shared_key: Option<String>,
#[serde(rename = "connectionStatus", default, skip_serializing_if = "Option::is_none")]
pub connection_status: Option<virtual_network_gateway_connection_properties_format::ConnectionStatus>,
#[serde(rename = "egressBytesTransferred", default, skip_serializing_if = "Option::is_none")]
pub egress_bytes_transferred: Option<i64>,
#[serde(rename = "ingressBytesTransferred", default, skip_serializing_if = "Option::is_none")]
pub ingress_bytes_transferred: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub peer: Option<SubResource>,
#[serde(rename = "enableBgp", default, skip_serializing_if = "Option::is_none")]
pub enable_bgp: Option<bool>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod virtual_network_gateway_connection_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnectionType {
IPsec,
Vnet2Vnet,
ExpressRoute,
#[serde(rename = "VPNClient")]
VpnClient,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnectionStatus {
Unknown,
Connecting,
Connected,
NotConnected,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualNetworkGatewayIpConfigurationPropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayIpConfigurationPropertiesFormat {
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "privateIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub private_ip_allocation_method: Option<virtual_network_gateway_ip_configuration_properties_format::PrivateIpAllocationMethod>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<SubResource>,
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod virtual_network_gateway_ip_configuration_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAllocationMethod {
Static,
Dynamic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualNetworkGateway>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewayPropertiesFormat {
#[serde(rename = "ipConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub ip_configurations: Vec<VirtualNetworkGatewayIpConfiguration>,
#[serde(rename = "gatewayType", default, skip_serializing_if = "Option::is_none")]
pub gateway_type: Option<virtual_network_gateway_properties_format::GatewayType>,
#[serde(rename = "vpnType", default, skip_serializing_if = "Option::is_none")]
pub vpn_type: Option<virtual_network_gateway_properties_format::VpnType>,
#[serde(rename = "enableBgp", default, skip_serializing_if = "Option::is_none")]
pub enable_bgp: Option<bool>,
#[serde(rename = "gatewayDefaultSite", default, skip_serializing_if = "Option::is_none")]
pub gateway_default_site: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<VirtualNetworkGatewaySku>,
#[serde(rename = "vpnClientConfiguration", default, skip_serializing_if = "Option::is_none")]
pub vpn_client_configuration: Option<VpnClientConfiguration>,
#[serde(rename = "bgpSettings", default, skip_serializing_if = "Option::is_none")]
pub bgp_settings: Option<BgpSettings>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod virtual_network_gateway_properties_format {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum GatewayType {
Vpn,
ExpressRoute,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VpnType {
PolicyBased,
RouteBased,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkGatewaySku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<virtual_network_gateway_sku::Name>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<virtual_network_gateway_sku::Tier>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i32>,
}
pub mod virtual_network_gateway_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
Basic,
HighPerformance,
Standard,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Basic,
HighPerformance,
Standard,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualNetwork>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkPropertiesFormat {
#[serde(rename = "addressSpace", default, skip_serializing_if = "Option::is_none")]
pub address_space: Option<AddressSpace>,
#[serde(rename = "dhcpOptions", default, skip_serializing_if = "Option::is_none")]
pub dhcp_options: Option<DhcpOptions>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub subnets: Vec<Subnet>,
#[serde(rename = "resourceGuid", default, skip_serializing_if = "Option::is_none")]
pub resource_guid: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VpnClientConfiguration {
#[serde(rename = "vpnClientAddressPool", default, skip_serializing_if = "Option::is_none")]
pub vpn_client_address_pool: Option<AddressSpace>,
#[serde(rename = "vpnClientRootCertificates", default, skip_serializing_if = "Vec::is_empty")]
pub vpn_client_root_certificates: Vec<VpnClientRootCertificate>,
#[serde(rename = "vpnClientRevokedCertificates", default, skip_serializing_if = "Vec::is_empty")]
pub vpn_client_revoked_certificates: Vec<VpnClientRevokedCertificate>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VpnClientParameters {
#[serde(rename = "ProcessorArchitecture", default, skip_serializing_if = "Option::is_none")]
pub processor_architecture: Option<vpn_client_parameters::ProcessorArchitecture>,
}
pub mod vpn_client_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProcessorArchitecture {
Amd64,
X86,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VpnClientRevokedCertificate {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VpnClientRevokedCertificatePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VpnClientRevokedCertificatePropertiesFormat {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub thumbprint: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VpnClientRootCertificate {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VpnClientRootCertificatePropertiesFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VpnClientRootCertificatePropertiesFormat {
#[serde(rename = "publicCertData", default, skip_serializing_if = "Option::is_none")]
pub public_cert_data: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
| 47.403263 | 137 | 0.728499 |
d50c4f4cf9e077e00d36abfe5809e4a4929a2fe3 | 252 | use std::sync::{Mutex};
use chain::Chain;
pub mod tx;
pub mod utxo;
pub mod block;
pub mod func;
pub mod chain;
pub mod utxos;
use once_cell::sync::Lazy;
pub static BLOCK_CHAIN: Lazy<Mutex<Chain>> = Lazy::new(|| {
Mutex::new(Chain::new())
});
| 14 | 59 | 0.65873 |
abf5f1c32be371837884565ca8d346f7601923ef | 5,443 | use anyhow::{Context, Error};
use hotg_rune_core::{ElementType as RuneElementType, Shape, TFLITE_MIMETYPE};
use std::{borrow::Cow, cell::Cell, convert::TryInto, ffi::CStr, sync::Mutex};
use hotg_runecoral::{
ElementType, InferenceContext, Tensor, TensorDescriptor, TensorMut,
AccelerationBackend,
};
use crate::Model;
/// Create a new [`Model`] backed by [`hotg_runecoral`].
pub fn new_model(
model_bytes: &[u8],
inputs: Option<&[Shape<'_>]>,
outputs: Option<&[Shape<'_>]>,
) -> Result<Box<dyn Model>, Error> {
let inputs = inputs.context("The input shapes must be provided")?;
let outputs = outputs.context("The output shapes must be provided")?;
let input_descriptors = inputs
.iter()
.map(descriptor)
.collect::<Result<Vec<_>, Error>>()
.context("Invalid input")?;
let output_descriptors = outputs
.iter()
.map(descriptor)
.collect::<Result<Vec<_>, Error>>()
.context("Invalid output")?;
let ctx = InferenceContext::create_context(
TFLITE_MIMETYPE,
model_bytes,
AccelerationBackend::NONE,
)
.context("Unable to create the inference context")?;
let model_input_descriptors: Vec<_> = ctx.inputs().collect();
ensure_shapes_equal(&input_descriptors, &model_input_descriptors)?;
let model_output_descriptors: Vec<_> = ctx.outputs().collect();
ensure_shapes_equal(&output_descriptors, &model_output_descriptors)?;
Ok(Box::new(RuneCoralModel {
ctx: Mutex::new(ctx),
inputs: inputs.iter().map(|s| s.to_owned()).collect(),
input_descriptors,
outputs: outputs.iter().map(|s| s.to_owned()).collect(),
output_descriptors,
}))
}
fn descriptor(s: &Shape) -> Result<TensorDescriptor<'static>, Error> {
let dimensions: Vec<i32> = s
.dimensions()
.iter()
.copied()
.map(|d| d.try_into().unwrap())
.collect();
Ok(TensorDescriptor {
name: CStr::from_bytes_with_nul(b"\0").unwrap(),
element_type: element_type(s.element_type())?,
shape: Cow::Owned(dimensions),
})
}
struct RuneCoralModel {
ctx: Mutex<InferenceContext>,
inputs: Vec<Shape<'static>>,
input_descriptors: Vec<TensorDescriptor<'static>>,
outputs: Vec<Shape<'static>>,
output_descriptors: Vec<TensorDescriptor<'static>>,
}
fn element_type(rune_type: RuneElementType) -> Result<ElementType, Error> {
Ok(match rune_type {
RuneElementType::I8 => ElementType::Int8,
RuneElementType::U8 => ElementType::UInt8,
RuneElementType::I16 => ElementType::Int16,
RuneElementType::I32 => ElementType::Int32,
RuneElementType::I64 => ElementType::Int64,
RuneElementType::F32 => ElementType::Float32,
RuneElementType::F64 => ElementType::Float64,
RuneElementType::String => ElementType::String,
_ => {
anyhow::bail!(
"librunecoral doesn't support {:?} tensors",
rune_type
)
},
})
}
fn ensure_shapes_equal(
from_rune: &[TensorDescriptor<'_>],
from_model: &[TensorDescriptor<'_>],
) -> Result<(), Error> {
if from_rune.len() == from_model.len()
&& from_rune.iter().zip(from_model.iter()).all(|(x, y)| {
x.element_type == y.element_type && x.shape == y.shape
})
{
return Ok(());
}
fn pretty_shapes(descriptors: &[TensorDescriptor<'_>]) -> String {
format!(
"[{}]",
descriptors
.iter()
.map(|d| format!("{}", d))
.collect::<Vec<_>>()
.join(", ")
)
}
anyhow::bail!(
"The Rune said tensors would be {}, but the model said they would be {}",
pretty_shapes(from_rune),
pretty_shapes(from_model),
);
}
impl super::Model for RuneCoralModel {
unsafe fn infer(
&mut self,
inputs: &[&[Cell<u8>]],
outputs: &[&[Cell<u8>]],
) -> Result<(), Error> {
let mut ctx = self.ctx.lock().expect("Lock was poisoned");
let inputs: Vec<Tensor<'_>> = self
.input_descriptors
.iter()
.zip(inputs)
.map(|(desc, data)| Tensor {
element_type: desc.element_type,
shape: Cow::Borrowed(&desc.shape),
// Safety:
buffer: unsafe {
std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len(),
)
},
})
.collect();
let mut outputs: Vec<TensorMut<'_>> = self
.output_descriptors
.iter()
.zip(outputs)
.map(|(desc, data)| TensorMut {
element_type: desc.element_type,
shape: Cow::Borrowed(&desc.shape),
buffer: unsafe {
std::slice::from_raw_parts_mut(
data.as_ptr() as *const Cell<u8> as *mut u8,
data.len(),
)
},
})
.collect();
ctx.infer(&inputs, &mut outputs)
.context("Inference failed")?;
Ok(())
}
fn input_shapes(&self) -> &[Shape<'_>] { &self.inputs }
fn output_shapes(&self) -> &[Shape<'_>] { &self.outputs }
}
| 31.102857 | 85 | 0.548595 |
1e30eef4bb232c8365c0b4c149bdc4676b5a64b9 | 6,247 | use std::prelude::v1::*;
use super::plumbing::*;
use super::*;
use std::cmp;
/// `MinLen` is an iterator that imposes a minimum length on iterator splits.
/// This struct is created by the [`min_len()`] method on [`IndexedParallelIterator`]
///
/// [`min_len()`]: trait.IndexedParallelIterator.html#method.min_len
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct MinLen<I: IndexedParallelIterator> {
base: I,
min: usize,
}
impl<I> MinLen<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `MinLen` iterator.
pub(super) fn new(base: I, min: usize) -> Self {
MinLen { base, min }
}
}
impl<I> ParallelIterator for MinLen<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for MinLen<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
min: self.min,
});
struct Callback<CB> {
callback: CB,
min: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MinLenProducer {
base,
min: self.min,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// `MinLenProducer` implementation
struct MinLenProducer<P> {
base: P,
min: usize,
}
impl<P> Producer for MinLenProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = P::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter()
}
fn min_len(&self) -> usize {
cmp::max(self.min, self.base.min_len())
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MinLenProducer {
base: left,
min: self.min,
},
MinLenProducer {
base: right,
min: self.min,
},
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(folder)
}
}
/// `MaxLen` is an iterator that imposes a maximum length on iterator splits.
/// This struct is created by the [`max_len()`] method on [`IndexedParallelIterator`]
///
/// [`max_len()`]: trait.IndexedParallelIterator.html#method.max_len
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct MaxLen<I: IndexedParallelIterator> {
base: I,
max: usize,
}
impl<I> MaxLen<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `MaxLen` iterator.
pub(super) fn new(base: I, max: usize) -> Self {
MaxLen { base, max }
}
}
impl<I> ParallelIterator for MaxLen<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for MaxLen<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
max: self.max,
});
struct Callback<CB> {
callback: CB,
max: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MaxLenProducer {
base,
max: self.max,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// `MaxLenProducer` implementation
struct MaxLenProducer<P> {
base: P,
max: usize,
}
impl<P> Producer for MaxLenProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = P::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
cmp::min(self.max, self.base.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MaxLenProducer {
base: left,
max: self.max,
},
MaxLenProducer {
base: right,
max: self.max,
},
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(folder)
}
}
| 22.882784 | 85 | 0.526973 |
8f04085491fa276fc1194c99c437c9fdd9a1bccf | 26,438 | use std::collections::hash_map::{Entry, HashMap};
use std::collections::{BTreeSet, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
use std::str;
use std::sync::{Arc, Mutex};
use crate::core::PackageId;
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::machine_message;
use crate::util::Cfg;
use crate::util::{self, internal, paths, profile};
use super::job::{Freshness, Job, Work};
use super::{fingerprint, Context, Kind, TargetConfig, Unit};
/// Contains the parsed output of a custom build script.
#[derive(Clone, Debug, Hash)]
pub struct BuildOutput {
/// Paths to pass to rustc with the `-L` flag.
pub library_paths: Vec<PathBuf>,
/// Names and link kinds of libraries, suitable for the `-l` flag.
pub library_links: Vec<String>,
/// Linker arguments suitable to be passed to `-C link-arg=<args>`
pub linker_args: Vec<String>,
/// Various `--cfg` flags to pass to the compiler.
pub cfgs: Vec<String>,
/// Additional environment variables to run the compiler with.
pub env: Vec<(String, String)>,
/// Metadata to pass to the immediate dependencies.
pub metadata: Vec<(String, String)>,
/// Paths to trigger a rerun of this build script.
/// May be absolute or relative paths (relative to package root).
pub rerun_if_changed: Vec<PathBuf>,
/// Environment variables which, when changed, will cause a rebuild.
pub rerun_if_env_changed: Vec<String>,
/// Warnings generated by this build.
pub warnings: Vec<String>,
}
/// Map of packages to build info.
pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
/// Build info and overrides.
pub struct BuildState {
pub outputs: Mutex<BuildMap>,
overrides: HashMap<(String, Kind), BuildOutput>,
}
#[derive(Default)]
pub struct BuildScripts {
// Cargo will use this `to_link` vector to add `-L` flags to compiles as we
// propagate them upwards towards the final build. Note, however, that we
// need to preserve the ordering of `to_link` to be topologically sorted.
// This will ensure that build scripts which print their paths properly will
// correctly pick up the files they generated (if there are duplicates
// elsewhere).
//
// To preserve this ordering, the (id, kind) is stored in two places, once
// in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain
// this as we're building interactively below to ensure that the memory
// usage here doesn't blow up too much.
//
// For more information, see #2354.
pub to_link: Vec<(PackageId, Kind)>,
seen_to_link: HashSet<(PackageId, Kind)>,
pub plugins: BTreeSet<PackageId>,
}
#[derive(Debug)]
pub struct BuildDeps {
pub build_script_output: PathBuf,
pub rerun_if_changed: Vec<PathBuf>,
pub rerun_if_env_changed: Vec<String>,
}
/// Prepares a `Work` that executes the target as a custom build script.
///
/// The `req` given is the requirement which this run of the build script will
/// prepare work for. If the requirement is specified as both the target and the
/// host platforms it is assumed that the two are equal and the build script is
/// only run once (not twice).
pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Job> {
let _p = profile::start(format!(
"build script prepare: {}/{}",
unit.pkg,
unit.target.name()
));
let key = (unit.pkg.package_id(), unit.kind);
if cx.build_script_overridden.contains(&key) {
fingerprint::prepare_target(cx, unit, false)
} else {
build_work(cx, unit)
}
}
fn emit_build_output(output: &BuildOutput, package_id: PackageId) {
let library_paths = output
.library_paths
.iter()
.map(|l| l.display().to_string())
.collect::<Vec<_>>();
machine_message::emit(&machine_message::BuildScript {
package_id,
linked_libs: &output.library_links,
linked_paths: &library_paths,
cfgs: &output.cfgs,
env: &output.env,
});
}
fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Job> {
assert!(unit.mode.is_run_custom_build());
let bcx = &cx.bcx;
let dependencies = cx.dep_targets(unit);
let build_script_unit = dependencies
.iter()
.find(|d| !d.mode.is_run_custom_build() && d.target.is_custom_build())
.expect("running a script not depending on an actual script");
let script_dir = cx.files().build_script_dir(build_script_unit);
let script_out_dir = cx.files().build_script_out_dir(unit);
let script_run_dir = cx.files().build_script_run_dir(unit);
let build_plan = bcx.build_config.build_plan;
let invocation_name = unit.buildkey();
if let Some(deps) = unit.pkg.manifest().metabuild() {
prepare_metabuild(cx, build_script_unit, deps)?;
}
// Building the command to execute
let to_exec = script_dir.join(unit.target.name());
// Start preparing the process to execute, starting out with some
// environment variables. Note that the profile-related environment
// variables are not set with this the build script's profile but rather the
// package's library profile.
// NOTE: if you add any profile flags, be sure to update
// `Profiles::get_profile_run_custom_build` so that those flags get
// carried over.
let to_exec = to_exec.into_os_string();
let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?;
let debug = unit.profile.debuginfo.unwrap_or(0) != 0;
cmd.env("OUT_DIR", &script_out_dir)
.env("CARGO_MANIFEST_DIR", unit.pkg.root())
.env("NUM_JOBS", &bcx.jobs().to_string())
.env(
"TARGET",
&match unit.kind {
Kind::Host => bcx.host_triple(),
Kind::Target => bcx.target_triple(),
},
)
.env("DEBUG", debug.to_string())
.env("OPT_LEVEL", &unit.profile.opt_level.to_string())
.env(
"PROFILE",
if bcx.build_config.release {
"release"
} else {
"debug"
},
)
.env("HOST", &bcx.host_triple())
.env("RUSTC", &bcx.rustc.path)
.env("RUSTDOC", &*bcx.config.rustdoc()?)
.inherit_jobserver(&cx.jobserver);
if let Some(ref linker) = bcx.target_config.linker {
cmd.env("RUSTC_LINKER", linker);
}
if let Some(links) = unit.pkg.manifest().links() {
cmd.env("CARGO_MANIFEST_LINKS", links);
}
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
for feat in bcx.resolve.features(unit.pkg.package_id()).iter() {
cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
}
let mut cfg_map = HashMap::new();
for cfg in bcx.cfg(unit.kind) {
match *cfg {
Cfg::Name(ref n) => {
cfg_map.insert(n.clone(), None);
}
Cfg::KeyPair(ref k, ref v) => {
if let Some(ref mut values) =
*cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new()))
{
values.push(v.clone())
}
}
}
}
for (k, v) in cfg_map {
let k = format!("CARGO_CFG_{}", super::envify(&k));
match v {
Some(list) => {
cmd.env(&k, list.join(","));
}
None => {
cmd.env(&k, "");
}
}
}
// Gather the set of native dependencies that this package has along with
// some other variables to close over.
//
// This information will be used at build-time later on to figure out which
// sorts of variables need to be discovered at that time.
let lib_deps = {
dependencies
.iter()
.filter_map(|unit| {
if unit.mode.is_run_custom_build() {
Some((
unit.pkg.manifest().links().unwrap().to_string(),
unit.pkg.package_id(),
))
} else {
None
}
})
.collect::<Vec<_>>()
};
let pkg_name = unit.pkg.to_string();
let build_state = Arc::clone(&cx.build_state);
let id = unit.pkg.package_id();
let output_file = script_run_dir.join("output");
let err_file = script_run_dir.join("stderr");
let root_output_file = script_run_dir.join("root-output");
let host_target_root = cx.files().host_root().to_path_buf();
let all = (
id,
pkg_name.clone(),
Arc::clone(&build_state),
output_file.clone(),
script_out_dir.clone(),
);
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
let json_messages = bcx.build_config.json_messages();
let extra_verbose = bcx.config.extra_verbose();
let (prev_output, prev_script_out_dir) = prev_build_output(cx, unit);
fs::create_dir_all(&script_dir)?;
fs::create_dir_all(&script_out_dir)?;
// Prepare the unit of "dirty work" which will actually run the custom build
// command.
//
// Note that this has to do some extra work just before running the command
// to determine extra environment variables and such.
let dirty = Work::new(move |state| {
// Make sure that OUT_DIR exists.
//
// If we have an old build directory, then just move it into place,
// otherwise create it!
if fs::metadata(&script_out_dir).is_err() {
fs::create_dir(&script_out_dir).chain_err(|| {
internal(
"failed to create script output directory for \
build command",
)
})?;
}
// For all our native lib dependencies, pick up their metadata to pass
// along to this custom build command. We're also careful to augment our
// dynamic library search path in case the build script depended on any
// native dynamic libraries.
if !build_plan {
let build_state = build_state.outputs.lock().unwrap();
for (name, id) in lib_deps {
let key = (id, kind);
let state = build_state.get(&key).ok_or_else(|| {
internal(format!(
"failed to locate build state for env \
vars: {}/{:?}",
id, kind
))
})?;
let data = &state.metadata;
for &(ref key, ref value) in data.iter() {
cmd.env(
&format!("DEP_{}_{}", super::envify(&name), super::envify(key)),
value,
);
}
}
if let Some(build_scripts) = build_scripts {
super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &host_target_root)?;
}
}
// And now finally, run the build command itself!
if build_plan {
state.build_plan(invocation_name, cmd.clone(), Arc::new(Vec::new()));
} else {
state.running(&cmd);
let timestamp = paths::set_invocation_time(&script_run_dir)?;
let output = if extra_verbose {
let prefix = format!("[{} {}] ", id.name(), id.version());
state.capture_output(&cmd, Some(prefix), true)
} else {
cmd.exec_with_output()
};
let output = output.map_err(|e| {
failure::format_err!(
"failed to run custom build command for `{}`\n{}",
pkg_name,
e
)
})?;
// After the build command has finished running, we need to be sure to
// remember all of its output so we can later discover precisely what it
// was, even if we don't run the build command again (due to freshness).
//
// This is also the location where we provide feedback into the build
// state informing what variables were discovered via our script as
// well.
paths::write(&output_file, &output.stdout)?;
filetime::set_file_times(output_file, timestamp, timestamp)?;
paths::write(&err_file, &output.stderr)?;
paths::write(&root_output_file, util::path2bytes(&script_out_dir)?)?;
let parsed_output =
BuildOutput::parse(&output.stdout, &pkg_name, &script_out_dir, &script_out_dir)?;
if json_messages {
emit_build_output(&parsed_output, id);
}
build_state.insert(id, kind, parsed_output);
}
Ok(())
});
// Now that we've prepared our work-to-do, we need to prepare the fresh work
// itself to run when we actually end up just discarding what we calculated
// above.
let fresh = Work::new(move |_tx| {
let (id, pkg_name, build_state, output_file, script_out_dir) = all;
let output = match prev_output {
Some(output) => output,
None => BuildOutput::parse_file(
&output_file,
&pkg_name,
&prev_script_out_dir,
&script_out_dir,
)?,
};
if json_messages {
emit_build_output(&output, id);
}
build_state.insert(id, kind, output);
Ok(())
});
let mut job = if cx.bcx.build_config.build_plan {
Job::new(Work::noop(), Freshness::Dirty)
} else {
fingerprint::prepare_target(cx, unit, false)?
};
if job.freshness() == Freshness::Dirty {
job.before(dirty);
} else {
job.before(fresh);
}
Ok(job)
}
impl BuildState {
pub fn new(host_config: &TargetConfig, target_config: &TargetConfig) -> BuildState {
let mut overrides = HashMap::new();
let i1 = host_config.overrides.iter().map(|p| (p, Kind::Host));
let i2 = target_config.overrides.iter().map(|p| (p, Kind::Target));
for ((name, output), kind) in i1.chain(i2) {
overrides.insert((name.clone(), kind), output.clone());
}
BuildState {
outputs: Mutex::new(HashMap::new()),
overrides,
}
}
fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
self.outputs.lock().unwrap().insert((id, kind), output);
}
}
impl BuildOutput {
pub fn parse_file(
path: &Path,
pkg_name: &str,
script_out_dir_when_generated: &Path,
script_out_dir: &Path,
) -> CargoResult<BuildOutput> {
let contents = paths::read_bytes(path)?;
BuildOutput::parse(
&contents,
pkg_name,
script_out_dir_when_generated,
script_out_dir,
)
}
// Parses the output of a script.
// The `pkg_name` is used for error messages.
pub fn parse(
input: &[u8],
pkg_name: &str,
script_out_dir_when_generated: &Path,
script_out_dir: &Path,
) -> CargoResult<BuildOutput> {
let mut library_paths = Vec::new();
let mut library_links = Vec::new();
let mut linker_args = Vec::new();
let mut cfgs = Vec::new();
let mut env = Vec::new();
let mut metadata = Vec::new();
let mut rerun_if_changed = Vec::new();
let mut rerun_if_env_changed = Vec::new();
let mut warnings = Vec::new();
let whence = format!("build script of `{}`", pkg_name);
for line in input.split(|b| *b == b'\n') {
let line = match str::from_utf8(line) {
Ok(line) => line.trim(),
Err(..) => continue,
};
let mut iter = line.splitn(2, ':');
if iter.next() != Some("cargo") {
// skip this line since it doesn't start with "cargo:"
continue;
}
let data = match iter.next() {
Some(val) => val,
None => continue,
};
// getting the `key=value` part of the line
let mut iter = data.splitn(2, '=');
let key = iter.next();
let value = iter.next();
let (key, value) = match (key, value) {
(Some(a), Some(b)) => (a, b.trim_end()),
// Line started with `cargo:` but didn't match `key=value`.
_ => failure::bail!("Wrong output in {}: `{}`", whence, line),
};
// This will rewrite paths if the target directory has been moved.
let value = value.replace(
script_out_dir_when_generated.to_str().unwrap(),
script_out_dir.to_str().unwrap(),
);
match key {
"rustc-flags" => {
let (paths, links) = BuildOutput::parse_rustc_flags(&value, &whence)?;
library_links.extend(links.into_iter());
library_paths.extend(paths.into_iter());
}
"rustc-link-lib" => library_links.push(value.to_string()),
"rustc-link-search" => library_paths.push(PathBuf::from(value)),
"rustc-cdylib-link-arg" => linker_args.push(value.to_string()),
"rustc-cfg" => cfgs.push(value.to_string()),
"rustc-env" => env.push(BuildOutput::parse_rustc_env(&value, &whence)?),
"warning" => warnings.push(value.to_string()),
"rerun-if-changed" => rerun_if_changed.push(PathBuf::from(value)),
"rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()),
_ => metadata.push((key.to_string(), value.to_string())),
}
}
Ok(BuildOutput {
library_paths,
library_links,
linker_args,
cfgs,
env,
metadata,
rerun_if_changed,
rerun_if_env_changed,
warnings,
})
}
pub fn parse_rustc_flags(
value: &str,
whence: &str,
) -> CargoResult<(Vec<PathBuf>, Vec<String>)> {
let value = value.trim();
let mut flags_iter = value
.split(|c: char| c.is_whitespace())
.filter(|w| w.chars().any(|c| !c.is_whitespace()));
let (mut library_paths, mut library_links) = (Vec::new(), Vec::new());
while let Some(flag) = flags_iter.next() {
if flag != "-l" && flag != "-L" {
failure::bail!(
"Only `-l` and `-L` flags are allowed in {}: `{}`",
whence,
value
)
}
let value = match flags_iter.next() {
Some(v) => v,
None => failure::bail!(
"Flag in rustc-flags has no value in {}: `{}`",
whence,
value
),
};
match flag {
"-l" => library_links.push(value.to_string()),
"-L" => library_paths.push(PathBuf::from(value)),
// was already checked above
_ => failure::bail!("only -l and -L flags are allowed"),
};
}
Ok((library_paths, library_links))
}
pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> {
let mut iter = value.splitn(2, '=');
let name = iter.next();
let val = iter.next();
match (name, val) {
(Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())),
_ => failure::bail!("Variable rustc-env has no value in {}: {}", whence, value),
}
}
}
fn prepare_metabuild<'a, 'cfg>(
cx: &Context<'a, 'cfg>,
unit: &Unit<'a>,
deps: &[String],
) -> CargoResult<()> {
let mut output = Vec::new();
let available_deps = cx.dep_targets(unit);
// Filter out optional dependencies, and look up the actual lib name.
let meta_deps: Vec<_> = deps
.iter()
.filter_map(|name| {
available_deps
.iter()
.find(|u| u.pkg.name().as_str() == name.as_str())
.map(|dep| dep.target.crate_name())
})
.collect();
for dep in &meta_deps {
output.push(format!("use {};\n", dep));
}
output.push("fn main() {\n".to_string());
for dep in &meta_deps {
output.push(format!(" {}::metabuild();\n", dep));
}
output.push("}\n".to_string());
let output = output.join("");
let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir());
fs::create_dir_all(path.parent().unwrap())?;
paths::write_if_changed(path, &output)?;
Ok(())
}
impl BuildDeps {
pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps {
BuildDeps {
build_script_output: output_file.to_path_buf(),
rerun_if_changed: output
.map(|p| &p.rerun_if_changed)
.cloned()
.unwrap_or_default(),
rerun_if_env_changed: output
.map(|p| &p.rerun_if_env_changed)
.cloned()
.unwrap_or_default(),
}
}
}
/// Computes the `build_scripts` map in the `Context` which tracks what build
/// scripts each package depends on.
///
/// The global `build_scripts` map lists for all (package, kind) tuples what set
/// of packages' build script outputs must be considered. For example this lists
/// all dependencies' `-L` flags which need to be propagated transitively.
///
/// The given set of targets to this function is the initial set of
/// targets/profiles which are being built.
pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> {
let mut ret = HashMap::new();
for unit in units {
build(&mut ret, cx, unit)?;
}
cx.build_scripts
.extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v))));
return Ok(());
// Recursive function to build up the map we're constructing. This function
// memoizes all of its return values as it goes along.
fn build<'a, 'b, 'cfg>(
out: &'a mut HashMap<Unit<'b>, BuildScripts>,
cx: &mut Context<'b, 'cfg>,
unit: &Unit<'b>,
) -> CargoResult<&'a BuildScripts> {
// Do a quick pre-flight check to see if we've already calculated the
// set of dependencies.
if out.contains_key(unit) {
return Ok(&out[unit]);
}
let key = unit
.pkg
.manifest()
.links()
.map(|l| (l.to_string(), unit.kind));
let build_state = &cx.build_state;
if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) {
let key = (unit.pkg.package_id(), unit.kind);
cx.build_script_overridden.insert(key);
build_state
.outputs
.lock()
.unwrap()
.insert(key, output.clone());
}
let mut ret = BuildScripts::default();
if !unit.target.is_custom_build() && unit.pkg.has_custom_build() {
add_to_link(&mut ret, unit.pkg.package_id(), unit.kind);
}
if unit.mode.is_run_custom_build() {
parse_previous_explicit_deps(cx, unit)?;
}
// We want to invoke the compiler deterministically to be cache-friendly
// to rustc invocation caching schemes, so be sure to generate the same
// set of build script dependency orderings via sorting the targets that
// come out of the `Context`.
let mut targets = cx.dep_targets(unit);
targets.sort_by_key(|u| u.pkg.package_id());
for unit in targets.iter() {
let dep_scripts = build(out, cx, unit)?;
if unit.target.for_host() {
ret.plugins
.extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned());
} else if unit.target.linkable() {
for &(pkg, kind) in dep_scripts.to_link.iter() {
add_to_link(&mut ret, pkg, kind);
}
}
}
match out.entry(*unit) {
Entry::Vacant(entry) => Ok(entry.insert(ret)),
Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"),
}
}
// When adding an entry to 'to_link' we only actually push it on if the
// script hasn't seen it yet (e.g., we don't push on duplicates).
fn add_to_link(scripts: &mut BuildScripts, pkg: PackageId, kind: Kind) {
if scripts.seen_to_link.insert((pkg, kind)) {
scripts.to_link.push((pkg, kind));
}
}
fn parse_previous_explicit_deps<'a, 'cfg>(
cx: &mut Context<'a, 'cfg>,
unit: &Unit<'a>,
) -> CargoResult<()> {
let script_run_dir = cx.files().build_script_run_dir(unit);
let output_file = script_run_dir.join("output");
let (prev_output, _) = prev_build_output(cx, unit);
let deps = BuildDeps::new(&output_file, prev_output.as_ref());
cx.build_explicit_deps.insert(*unit, deps);
Ok(())
}
}
/// Returns the previous parsed `BuildOutput`, if any, from a previous
/// execution.
///
/// Also returns the directory containing the output, typically used later in
/// processing.
fn prev_build_output<'a, 'cfg>(
cx: &mut Context<'a, 'cfg>,
unit: &Unit<'a>,
) -> (Option<BuildOutput>, PathBuf) {
let script_out_dir = cx.files().build_script_out_dir(unit);
let script_run_dir = cx.files().build_script_run_dir(unit);
let root_output_file = script_run_dir.join("root-output");
let output_file = script_run_dir.join("output");
let prev_script_out_dir = paths::read_bytes(&root_output_file)
.and_then(|bytes| util::bytes2path(&bytes))
.unwrap_or_else(|_| script_out_dir.clone());
(
BuildOutput::parse_file(
&output_file,
&unit.pkg.to_string(),
&prev_script_out_dir,
&script_out_dir,
)
.ok(),
prev_script_out_dir,
)
}
| 36.516575 | 99 | 0.559649 |
e4c715da06c82853aa6d6ca1d218f4dbd236ba9e | 22,984 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::DMACFG {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct DPWROFFR {
bits: bool,
}
impl DPWROFFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = "Possible values of the field `DMAMSK`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAMSKR {
#[doc = "FIFO Contents are copied directly to memory without modification. value."]
DIS,
#[doc = "Only the FIFODATA contents are copied to memory on DMA transfers. The SLOTNUM and FIFOCNT contents are cleared to zero. value."]
EN,
}
impl DMAMSKR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAMSKR::DIS => false,
DMAMSKR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAMSKR {
match value {
false => DMAMSKR::DIS,
true => DMAMSKR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMAMSKR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMAMSKR::EN
}
}
#[doc = "Possible values of the field `DMAHONSTAT`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAHONSTATR {
#[doc = "ADC conversions will continue regardless of DMA status register value."]
DIS,
#[doc = "ADC conversions will not progress if DMAERR or DMACPL bits in DMA status register are set. value."]
EN,
}
impl DMAHONSTATR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAHONSTATR::DIS => false,
DMAHONSTATR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAHONSTATR {
match value {
false => DMAHONSTATR::DIS,
true => DMAHONSTATR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMAHONSTATR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMAHONSTATR::EN
}
}
#[doc = "Possible values of the field `DMADYNPRI`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMADYNPRIR {
#[doc = "Disable dynamic priority (use DMAPRI setting only) value."]
DIS,
#[doc = "Enable dynamic priority value."]
EN,
}
impl DMADYNPRIR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMADYNPRIR::DIS => false,
DMADYNPRIR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMADYNPRIR {
match value {
false => DMADYNPRIR::DIS,
true => DMADYNPRIR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMADYNPRIR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMADYNPRIR::EN
}
}
#[doc = "Possible values of the field `DMAPRI`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAPRIR {
#[doc = "Low Priority (service as best effort) value."]
LOW,
#[doc = "High Priority (service immediately) value."]
HIGH,
}
impl DMAPRIR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAPRIR::LOW => false,
DMAPRIR::HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAPRIR {
match value {
false => DMAPRIR::LOW,
true => DMAPRIR::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline]
pub fn is_low(&self) -> bool {
*self == DMAPRIR::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline]
pub fn is_high(&self) -> bool {
*self == DMAPRIR::HIGH
}
}
#[doc = "Possible values of the field `DMADIR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMADIRR {
#[doc = "Peripheral to Memory (SRAM) transaction value."]
P2M,
#[doc = "Memory to Peripheral transaction value."]
M2P,
}
impl DMADIRR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMADIRR::P2M => false,
DMADIRR::M2P => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMADIRR {
match value {
false => DMADIRR::P2M,
true => DMADIRR::M2P,
}
}
#[doc = "Checks if the value of the field is `P2M`"]
#[inline]
pub fn is_p2m(&self) -> bool {
*self == DMADIRR::P2M
}
#[doc = "Checks if the value of the field is `M2P`"]
#[inline]
pub fn is_m2p(&self) -> bool {
*self == DMADIRR::M2P
}
}
#[doc = "Possible values of the field `DMAEN`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAENR {
#[doc = "Disable DMA Function value."]
DIS,
#[doc = "Enable DMA Function value."]
EN,
}
impl DMAENR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAENR::DIS => false,
DMAENR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAENR {
match value {
false => DMAENR::DIS,
true => DMAENR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMAENR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMAENR::EN
}
}
#[doc = r" Proxy"]
pub struct _DPWROFFW<'a> {
w: &'a mut W,
}
impl<'a> _DPWROFFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAMSK`"]
pub enum DMAMSKW {
#[doc = "FIFO Contents are copied directly to memory without modification. value."]
DIS,
#[doc = "Only the FIFODATA contents are copied to memory on DMA transfers. The SLOTNUM and FIFOCNT contents are cleared to zero. value."]
EN,
}
impl DMAMSKW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAMSKW::DIS => false,
DMAMSKW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAMSKW<'a> {
w: &'a mut W,
}
impl<'a> _DMAMSKW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAMSKW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "FIFO Contents are copied directly to memory without modification. value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMAMSKW::DIS)
}
#[doc = "Only the FIFODATA contents are copied to memory on DMA transfers. The SLOTNUM and FIFOCNT contents are cleared to zero. value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMAMSKW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAHONSTAT`"]
pub enum DMAHONSTATW {
#[doc = "ADC conversions will continue regardless of DMA status register value."]
DIS,
#[doc = "ADC conversions will not progress if DMAERR or DMACPL bits in DMA status register are set. value."]
EN,
}
impl DMAHONSTATW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAHONSTATW::DIS => false,
DMAHONSTATW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAHONSTATW<'a> {
w: &'a mut W,
}
impl<'a> _DMAHONSTATW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAHONSTATW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "ADC conversions will continue regardless of DMA status register value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMAHONSTATW::DIS)
}
#[doc = "ADC conversions will not progress if DMAERR or DMACPL bits in DMA status register are set. value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMAHONSTATW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMADYNPRI`"]
pub enum DMADYNPRIW {
#[doc = "Disable dynamic priority (use DMAPRI setting only) value."]
DIS,
#[doc = "Enable dynamic priority value."]
EN,
}
impl DMADYNPRIW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMADYNPRIW::DIS => false,
DMADYNPRIW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMADYNPRIW<'a> {
w: &'a mut W,
}
impl<'a> _DMADYNPRIW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMADYNPRIW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable dynamic priority (use DMAPRI setting only) value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMADYNPRIW::DIS)
}
#[doc = "Enable dynamic priority value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMADYNPRIW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAPRI`"]
pub enum DMAPRIW {
#[doc = "Low Priority (service as best effort) value."]
LOW,
#[doc = "High Priority (service immediately) value."]
HIGH,
}
impl DMAPRIW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAPRIW::LOW => false,
DMAPRIW::HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAPRIW<'a> {
w: &'a mut W,
}
impl<'a> _DMAPRIW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAPRIW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Low Priority (service as best effort) value."]
#[inline]
pub fn low(self) -> &'a mut W {
self.variant(DMAPRIW::LOW)
}
#[doc = "High Priority (service immediately) value."]
#[inline]
pub fn high(self) -> &'a mut W {
self.variant(DMAPRIW::HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMADIR`"]
pub enum DMADIRW {
#[doc = "Peripheral to Memory (SRAM) transaction value."]
P2M,
#[doc = "Memory to Peripheral transaction value."]
M2P,
}
impl DMADIRW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMADIRW::P2M => false,
DMADIRW::M2P => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMADIRW<'a> {
w: &'a mut W,
}
impl<'a> _DMADIRW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMADIRW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Peripheral to Memory (SRAM) transaction value."]
#[inline]
pub fn p2m(self) -> &'a mut W {
self.variant(DMADIRW::P2M)
}
#[doc = "Memory to Peripheral transaction value."]
#[inline]
pub fn m2p(self) -> &'a mut W {
self.variant(DMADIRW::M2P)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAEN`"]
pub enum DMAENW {
#[doc = "Disable DMA Function value."]
DIS,
#[doc = "Enable DMA Function value."]
EN,
}
impl DMAENW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAENW::DIS => false,
DMAENW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAENW<'a> {
w: &'a mut W,
}
impl<'a> _DMAENW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAENW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable DMA Function value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMAENW::DIS)
}
#[doc = "Enable DMA Function value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMAENW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 18 - Power Off the ADC System upon DMACPL."]
#[inline]
pub fn dpwroff(&self) -> DPWROFFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
};
DPWROFFR { bits }
}
#[doc = "Bit 17 - Mask the FIFOCNT and SLOTNUM when transferring FIFO contents to memory"]
#[inline]
pub fn dmamsk(&self) -> DMAMSKR {
DMAMSKR::_from({
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 16 - Halt New ADC conversions until DMA Status DMAERR and DMACPL Cleared."]
#[inline]
pub fn dmahonstat(&self) -> DMAHONSTATR {
DMAHONSTATR::_from({
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 9 - Enables dynamic priority based on FIFO fullness. When FIFO is full, priority is automatically set to HIGH. Otherwise, DMAPRI is used."]
#[inline]
pub fn dmadynpri(&self) -> DMADYNPRIR {
DMADYNPRIR::_from({
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 8 - Sets the Priority of the DMA request"]
#[inline]
pub fn dmapri(&self) -> DMAPRIR {
DMAPRIR::_from({
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 2 - Direction"]
#[inline]
pub fn dmadir(&self) -> DMADIRR {
DMADIRR::_from({
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 0 - DMA Enable"]
#[inline]
pub fn dmaen(&self) -> DMAENR {
DMAENR::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 18 - Power Off the ADC System upon DMACPL."]
#[inline]
pub fn dpwroff(&mut self) -> _DPWROFFW {
_DPWROFFW { w: self }
}
#[doc = "Bit 17 - Mask the FIFOCNT and SLOTNUM when transferring FIFO contents to memory"]
#[inline]
pub fn dmamsk(&mut self) -> _DMAMSKW {
_DMAMSKW { w: self }
}
#[doc = "Bit 16 - Halt New ADC conversions until DMA Status DMAERR and DMACPL Cleared."]
#[inline]
pub fn dmahonstat(&mut self) -> _DMAHONSTATW {
_DMAHONSTATW { w: self }
}
#[doc = "Bit 9 - Enables dynamic priority based on FIFO fullness. When FIFO is full, priority is automatically set to HIGH. Otherwise, DMAPRI is used."]
#[inline]
pub fn dmadynpri(&mut self) -> _DMADYNPRIW {
_DMADYNPRIW { w: self }
}
#[doc = "Bit 8 - Sets the Priority of the DMA request"]
#[inline]
pub fn dmapri(&mut self) -> _DMAPRIW {
_DMAPRIW { w: self }
}
#[doc = "Bit 2 - Direction"]
#[inline]
pub fn dmadir(&mut self) -> _DMADIRW {
_DMADIRW { w: self }
}
#[doc = "Bit 0 - DMA Enable"]
#[inline]
pub fn dmaen(&mut self) -> _DMAENW {
_DMAENW { w: self }
}
}
| 27.427208 | 156 | 0.525931 |
eda35a826c872984c569941c7e1c782d8310c376 | 16,177 | use glow_protocol::ve_token::{StakerResponse, StateResponse};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use cosmwasm_std::testing::{MockApi, MockQuerier, MockStorage};
use cosmwasm_std::{
from_binary, from_slice, to_binary, Addr, Binary, BlockInfo, Coin, ContractInfo,
ContractResult, Decimal, Env, MessageInfo, OwnedDeps, Querier, QuerierResult, QueryRequest,
SystemError, SystemResult, Timestamp, Uint128, WasmQuery,
};
use cw20::{BalanceResponse as Cw20BalanceResponse, Cw20QueryMsg};
use terra_cosmwasm::{TaxCapResponse, TaxRateResponse, TerraQuery, TerraQueryWrapper, TerraRoute};
use cosmwasm_bignumber::{Decimal256, Uint256};
use glow_protocol::distributor::GlowEmissionRateResponse;
use moneymarket::market::EpochStateResponse;
use std::collections::HashMap;
use crate::tests::RATE;
use crate::oracle::OracleResponse;
pub const MOCK_CONTRACT_ADDR: &str = "cosmos2contract";
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum QueryMsg {
/// Query Epoch State to Anchor money market
EpochState {
block_height: Option<u64>,
distributed_interest: Option<Uint256>,
},
/// Query GLOW emission rate to distributor model contract
GlowEmissionRate {
current_award: Decimal256,
target_award: Decimal256,
current_emission_rate: Decimal256,
},
Balance {
address: String,
},
State {
timestamp: Option<u64>,
},
Staker {
address: String,
timestamp: Option<u64>,
},
GetRandomness {
round: u64,
},
}
/// mock_dependencies is a drop-in replacement for cosmwasm_std::testing::mock_dependencies
/// this uses our CustomQuerier.
pub fn mock_dependencies(
contract_balance: &[Coin],
) -> OwnedDeps<MockStorage, MockApi, WasmMockQuerier> {
let mut custom_querier: WasmMockQuerier =
WasmMockQuerier::new(MockQuerier::new(&[(MOCK_CONTRACT_ADDR, contract_balance)]));
// Mock aUST-UST exchange rate
custom_querier.with_exchange_rate(Decimal256::permille(RATE));
OwnedDeps {
storage: MockStorage::default(),
api: MockApi::default(),
querier: custom_querier,
}
}
/// mock_env is a drop-in replacement for cosmwasm_std::testing::mock_env
pub fn mock_env() -> Env {
Env {
block: BlockInfo {
height: 12_345,
time: Timestamp::from_seconds(1_595_431_050),
chain_id: "cosmos-testnet-14002".to_string(),
},
contract: ContractInfo {
address: Addr::unchecked(MOCK_CONTRACT_ADDR),
},
}
}
/// mock_info is a drop-in replacement for cosmwasm_std::testing::mock_info
pub fn mock_info(sender: &str, funds: &[Coin]) -> MessageInfo {
MessageInfo {
sender: Addr::unchecked(sender),
funds: funds.to_vec(),
}
}
pub struct WasmMockQuerier {
base: MockQuerier<TerraQueryWrapper>,
token_querier: TokenQuerier,
tax_querier: TaxQuerier,
exchange_rate_querier: ExchangeRateQuerier,
emission_rate_querier: EmissionRateQuerier, //TODO: use in tests and replace _ for EmissionRateQuerier
}
#[derive(Clone, Default)]
pub struct TokenQuerier {
// this lets us iterate over all pairs that match the first string
balances: HashMap<String, HashMap<String, Uint128>>,
}
impl TokenQuerier {
pub fn new(balances: &[(&String, &[(&String, &Uint128)])]) -> Self {
TokenQuerier {
balances: balances_to_map(balances),
}
}
}
pub(crate) fn balances_to_map(
balances: &[(&String, &[(&String, &Uint128)])],
) -> HashMap<String, HashMap<String, Uint128>> {
let mut balances_map: HashMap<String, HashMap<String, Uint128>> = HashMap::new();
for (contract_addr, balances) in balances.iter() {
let mut contract_balances_map: HashMap<String, Uint128> = HashMap::new();
for (addr, balance) in balances.iter() {
contract_balances_map.insert(addr.to_string(), **balance);
}
balances_map.insert(contract_addr.to_string(), contract_balances_map);
}
balances_map
}
#[derive(Clone, Default)]
pub struct TaxQuerier {
rate: Decimal,
// this lets us iterate over all pairs that match the first string
caps: HashMap<String, Uint128>,
}
impl TaxQuerier {
pub fn new(rate: Decimal, caps: &[(&String, &Uint128)]) -> Self {
TaxQuerier {
rate,
caps: caps_to_map(caps),
}
}
}
pub(crate) fn caps_to_map(caps: &[(&String, &Uint128)]) -> HashMap<String, Uint128> {
let mut owner_map: HashMap<String, Uint128> = HashMap::new();
for (denom, cap) in caps.iter() {
owner_map.insert(denom.to_string(), **cap);
}
owner_map
}
#[derive(Clone, Default)]
pub struct ExchangeRateQuerier {
exchange_rate: Decimal256,
}
impl ExchangeRateQuerier {
pub fn new(exchange_rate: Decimal256) -> Self {
ExchangeRateQuerier { exchange_rate }
}
}
#[derive(Clone, Default)]
#[allow(dead_code)] // TODO: use this fn in tests
pub struct EmissionRateQuerier {
emission_rate: Decimal256,
}
impl EmissionRateQuerier {
#[allow(dead_code)] // TODO: use this fn in tests
pub fn new(emission_rate: Decimal256) -> Self {
EmissionRateQuerier { emission_rate }
}
}
impl Querier for WasmMockQuerier {
fn raw_query(&self, bin_request: &[u8]) -> QuerierResult {
// MockQuerier doesn't support Custom, so we ignore it completely here
let request: QueryRequest<TerraQueryWrapper> = match from_slice(bin_request) {
Ok(v) => v,
Err(e) => {
return SystemResult::Err(SystemError::InvalidRequest {
error: format!("Parsing query request: {}", e),
request: bin_request.into(),
})
}
};
self.handle_query(&request)
}
}
impl WasmMockQuerier {
pub fn handle_query(&self, request: &QueryRequest<TerraQueryWrapper>) -> QuerierResult {
match &request {
QueryRequest::Custom(TerraQueryWrapper { route, query_data }) => {
if route == &TerraRoute::Treasury {
match query_data {
TerraQuery::TaxRate {} => {
let res = TaxRateResponse {
rate: self.tax_querier.rate,
};
SystemResult::Ok(ContractResult::from(to_binary(&res)))
}
TerraQuery::TaxCap { denom } => {
let cap = self
.tax_querier
.caps
.get(denom)
.copied()
.unwrap_or_default();
let res = TaxCapResponse { cap };
SystemResult::Ok(ContractResult::from(to_binary(&res)))
}
_ => panic!("DO NOT ENTER HERE"),
}
} else {
panic!("DO NOT ENTER HERE")
}
}
QueryRequest::Wasm(WasmQuery::Smart { contract_addr, msg }) => {
match from_binary::<QueryMsg>(msg).unwrap() {
QueryMsg::EpochState {
block_height: _,
distributed_interest: _,
} => {
SystemResult::Ok(ContractResult::from(to_binary(&EpochStateResponse {
exchange_rate: self.exchange_rate_querier.exchange_rate, // Current anchor rate,
aterra_supply: Uint256::one(),
})))
}
// TODO: revise, currently hard-coded
QueryMsg::GlowEmissionRate {
current_award: _,
target_award: _,
current_emission_rate: _,
} => SystemResult::Ok(ContractResult::from(to_binary(
&GlowEmissionRateResponse {
emission_rate: Decimal256::one(),
},
))),
QueryMsg::GetRandomness { round: _ } => {
SystemResult::Ok(ContractResult::from(to_binary(&OracleResponse {
randomness: Binary::from_base64(
"e74c6cfd99371c817e8c3e0099df9074032eec15189c49e5b4740b084ba5ce2b",
)
.unwrap(),
worker: Addr::unchecked(MOCK_CONTRACT_ADDR),
})))
}
QueryMsg::Staker { address, .. } => {
let balances: &HashMap<String, Uint128> =
match self.token_querier.balances.get(contract_addr) {
Some(balances) => balances,
None => {
return SystemResult::Err(SystemError::InvalidRequest {
error: format!(
"No balance info exists for the contract {}",
contract_addr
),
request: msg.as_slice().into(),
})
}
};
let balance = match balances.get(&address) {
Some(v) => *v,
None => {
return SystemResult::Ok(ContractResult::Ok(
to_binary(&Cw20BalanceResponse {
balance: Uint128::zero(),
})
.unwrap(),
));
}
};
SystemResult::Ok(ContractResult::Ok(
to_binary(&StakerResponse {
deposited_amount: balance,
balance,
locked_amount: balance,
})
.unwrap(),
))
}
QueryMsg::State { .. } => {
let balances: &HashMap<String, Uint128> =
match self.token_querier.balances.get(contract_addr) {
Some(balances) => balances,
None => {
return SystemResult::Err(SystemError::InvalidRequest {
error: format!(
"No balance info exists for the contract {}",
contract_addr
),
request: msg.as_slice().into(),
})
}
};
// Sum over the entire balance
let balance = balances.iter().fold(Uint128::zero(), |sum, x| sum + x.1);
SystemResult::Ok(ContractResult::Ok(
to_binary(&StateResponse {
total_deposited_amount: balance,
total_balance: balance,
total_locked_amount: balance,
})
.unwrap(),
))
}
_ => match from_binary::<Cw20QueryMsg>(msg).unwrap() {
Cw20QueryMsg::Balance { address } => {
let balances: &HashMap<String, Uint128> =
match self.token_querier.balances.get(contract_addr) {
Some(balances) => balances,
None => {
return SystemResult::Err(SystemError::InvalidRequest {
error: format!(
"No balance info exists for the contract {}",
contract_addr
),
request: msg.as_slice().into(),
})
}
};
let balance = match balances.get(&address) {
Some(v) => *v,
None => {
return SystemResult::Ok(ContractResult::Ok(
to_binary(&Cw20BalanceResponse {
balance: Uint128::zero(),
})
.unwrap(),
));
}
};
SystemResult::Ok(ContractResult::Ok(
to_binary(&Cw20BalanceResponse { balance }).unwrap(),
))
}
_ => panic!("DO NOT ENTER HERE"),
},
}
}
_ => self.base.handle_query(request),
}
}
}
impl WasmMockQuerier {
pub fn new(base: MockQuerier<TerraQueryWrapper>) -> Self {
WasmMockQuerier {
base,
token_querier: TokenQuerier::default(),
tax_querier: TaxQuerier::default(),
exchange_rate_querier: ExchangeRateQuerier::default(),
emission_rate_querier: EmissionRateQuerier::default(),
}
}
// set a new balance for the given address and return the old balance
pub fn update_balance<U: Into<String>>(
&mut self,
addr: U,
balance: Vec<Coin>,
) -> Option<Vec<Coin>> {
self.base.update_balance(addr, balance)
}
// configure the mint whitelist mock querier
pub fn with_token_balances(&mut self, balances: &[(&String, &[(&String, &Uint128)])]) {
self.token_querier = TokenQuerier::new(balances);
}
pub fn increment_token_balance(&mut self, address: String, token_addr: String, diff: Uint128) {
let contract_balances_map = self
.token_querier
.balances
.entry(address)
.or_insert_with(HashMap::new);
let balance = contract_balances_map
.entry(token_addr)
.or_insert_with(|| Uint128::from(0u128));
*balance += diff;
}
pub fn decrement_token_balance(&mut self, address: String, token_addr: String, diff: Uint128) {
let contract_balances_map = self
.token_querier
.balances
.entry(address)
.or_insert_with(HashMap::new);
let balance = contract_balances_map
.entry(token_addr)
.or_insert_with(|| Uint128::from(0u128));
*balance -= diff;
}
// configure the token owner mock querier
pub fn with_tax(&mut self, rate: Decimal, caps: &[(&String, &Uint128)]) {
self.tax_querier = TaxQuerier::new(rate, caps);
}
// configure anchor exchange rate
pub fn with_exchange_rate(&mut self, rate: Decimal256) {
self.exchange_rate_querier = ExchangeRateQuerier::new(rate);
}
// configure glow emission rate
#[allow(dead_code)] //TODO: Use in tests
pub fn with_emission_rate(&mut self, rate: Decimal256) {
self.emission_rate_querier = EmissionRateQuerier::new(rate);
}
}
| 37.018307 | 108 | 0.492489 |
29d329d46a05a748a664c5c1e4241d0c6753af29 | 731 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --cfg ndebug
// exec-env:RUST_LOG=conditional-debug-macro-off=4
#![feature(phase)]
#[phase(plugin, link)]
extern crate log;
pub fn main() {
// only fails if println! evaluates its argument.
debug!("{}", { if true { fail!() } });
}
| 33.227273 | 69 | 0.705882 |
9182f7ea60184f1be9708dd07b6b903055389cc4 | 387,159 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
pub mod operations {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<ComputeOperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Compute/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ComputeOperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod availability_sets {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
availability_set_name: &str,
subscription_id: &str,
) -> std::result::Result<AvailabilitySet, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AvailabilitySet =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
availability_set_name: &str,
parameters: &AvailabilitySet,
subscription_id: &str,
) -> std::result::Result<AvailabilitySet, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AvailabilitySet = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
availability_set_name: &str,
parameters: &AvailabilitySetUpdate,
subscription_id: &str,
) -> std::result::Result<AvailabilitySet, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AvailabilitySet =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
availability_set_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<AvailabilitySetListResult, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/availabilitySets",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AvailabilitySetListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_subscription::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<AvailabilitySetListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AvailabilitySetListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_available_sizes(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
availability_set_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineSizeListResult, list_available_sizes::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}/vmSizes",
operation_config.base_path(),
subscription_id,
resource_group_name,
availability_set_name
);
let mut url = url::Url::parse(url_str).map_err(list_available_sizes::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_available_sizes::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_available_sizes::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_available_sizes::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineSizeListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_available_sizes::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_available_sizes::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_available_sizes {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod proximity_placement_groups {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
proximity_placement_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ProximityPlacementGroup, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProximityPlacementGroup =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
proximity_placement_group_name: &str,
parameters: &ProximityPlacementGroup,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProximityPlacementGroup = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ProximityPlacementGroup = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ProximityPlacementGroup),
Created201(ProximityPlacementGroup),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
proximity_placement_group_name: &str,
parameters: &ProximityPlacementGroupUpdate,
subscription_id: &str,
) -> std::result::Result<ProximityPlacementGroup, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProximityPlacementGroup =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
proximity_placement_group_name: &str,
subscription_id: &str,
) -> std::result::Result<(), delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
proximity_placement_group_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ProximityPlacementGroupListResult, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/proximityPlacementGroups",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProximityPlacementGroupListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_subscription::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ProximityPlacementGroupListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProximityPlacementGroupListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_extension_images {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
type_: &str,
version: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineExtensionImage, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions/{}",
operation_config.base_path(),
subscription_id,
location,
publisher_name,
type_,
version
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineExtensionImage =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_types(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<VirtualMachineExtensionImage>, list_types::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types",
operation_config.base_path(),
subscription_id,
location,
publisher_name
);
let mut url = url::Url::parse(url_str).map_err(list_types::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_types::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_types::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_types::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<VirtualMachineExtensionImage> =
serde_json::from_slice(rsp_body).map_err(|source| list_types::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_types::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_types {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_versions(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
type_: &str,
filter: Option<&str>,
top: Option<i32>,
orderby: Option<&str>,
subscription_id: &str,
) -> std::result::Result<Vec<VirtualMachineExtensionImage>, list_versions::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions",
operation_config.base_path(),
subscription_id,
location,
publisher_name,
type_
);
let mut url = url::Url::parse(url_str).map_err(list_versions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_versions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(top) = top {
url.query_pairs_mut().append_pair("$top", top.to_string().as_str());
}
if let Some(orderby) = orderby {
url.query_pairs_mut().append_pair("$orderby", orderby);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_versions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_versions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<VirtualMachineExtensionImage> =
serde_json::from_slice(rsp_body).map_err(|source| list_versions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_versions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_versions {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_extensions {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
vm_extension_name: &str,
expand: Option<&str>,
subscription_id: &str,
) -> std::result::Result<VirtualMachineExtension, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name,
vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineExtension =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
vm_extension_name: &str,
extension_parameters: &VirtualMachineExtension,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name,
vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(extension_parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineExtension = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineExtension = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachineExtension),
Created201(VirtualMachineExtension),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
vm_extension_name: &str,
extension_parameters: &VirtualMachineExtensionUpdate,
subscription_id: &str,
) -> std::result::Result<VirtualMachineExtension, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name,
vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(extension_parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineExtension =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
vm_extension_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name,
vm_extension_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
expand: Option<&str>,
subscription_id: &str,
) -> std::result::Result<VirtualMachineExtensionsListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineExtensionsListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_images {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
offer: &str,
skus: &str,
version: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineImage, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions/{}" , operation_config . base_path () , subscription_id , location , publisher_name , offer , skus , version) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineImage =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
offer: &str,
skus: &str,
expand: Option<&str>,
top: Option<i32>,
orderby: Option<&str>,
subscription_id: &str,
) -> std::result::Result<Vec<VirtualMachineImageResource>, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions",
operation_config.base_path(),
subscription_id,
location,
publisher_name,
offer,
skus
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
if let Some(top) = top {
url.query_pairs_mut().append_pair("$top", top.to_string().as_str());
}
if let Some(orderby) = orderby {
url.query_pairs_mut().append_pair("$orderby", orderby);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<VirtualMachineImageResource> =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_offers(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<VirtualMachineImageResource>, list_offers::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers",
operation_config.base_path(),
subscription_id,
location,
publisher_name
);
let mut url = url::Url::parse(url_str).map_err(list_offers::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_offers::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_offers::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_offers::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<VirtualMachineImageResource> =
serde_json::from_slice(rsp_body).map_err(|source| list_offers::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_offers::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_offers {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_publishers(
operation_config: &crate::OperationConfig,
location: &str,
subscription_id: &str,
) -> std::result::Result<Vec<VirtualMachineImageResource>, list_publishers::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(list_publishers::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_publishers::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_publishers::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_publishers::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<VirtualMachineImageResource> = serde_json::from_slice(rsp_body)
.map_err(|source| list_publishers::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_publishers::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_publishers {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_skus(
operation_config: &crate::OperationConfig,
location: &str,
publisher_name: &str,
offer: &str,
subscription_id: &str,
) -> std::result::Result<Vec<VirtualMachineImageResource>, list_skus::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus",
operation_config.base_path(),
subscription_id,
location,
publisher_name,
offer
);
let mut url = url::Url::parse(url_str).map_err(list_skus::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_skus::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_skus::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_skus::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<VirtualMachineImageResource> =
serde_json::from_slice(rsp_body).map_err(|source| list_skus::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_skus::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_skus {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod usage {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
location: &str,
subscription_id: &str,
) -> std::result::Result<ListUsagesResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/usages",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ListUsagesResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machines {
use crate::models::*;
pub async fn list_by_location(
operation_config: &crate::OperationConfig,
location: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineListResult, list_by_location::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/virtualMachines",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(list_by_location::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_location::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_location::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_location::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_location::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_location::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_location {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn capture(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
parameters: &VirtualMachineCaptureParameters,
subscription_id: &str,
) -> std::result::Result<capture::Response, capture::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/capture",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(capture::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(capture::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(capture::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(capture::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(capture::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineCaptureResult =
serde_json::from_slice(rsp_body).map_err(|source| capture::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(capture::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(capture::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(capture::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod capture {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachineCaptureResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
expand: Option<&str>,
subscription_id: &str,
) -> std::result::Result<VirtualMachine, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachine =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
parameters: &VirtualMachine,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachine = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachine = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachine),
Created201(VirtualMachine),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
parameters: &VirtualMachineUpdate,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachine =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachine =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachine),
Created201(VirtualMachine),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn instance_view(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineInstanceView, instance_view::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/instanceView",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(instance_view::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(instance_view::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(instance_view::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(instance_view::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineInstanceView =
serde_json::from_slice(rsp_body).map_err(|source| instance_view::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(instance_view::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod instance_view {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn convert_to_managed_disks(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<convert_to_managed_disks::Response, convert_to_managed_disks::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/convertToManagedDisks",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(convert_to_managed_disks::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(convert_to_managed_disks::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(convert_to_managed_disks::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(convert_to_managed_disks::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(convert_to_managed_disks::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(convert_to_managed_disks::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(convert_to_managed_disks::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod convert_to_managed_disks {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn deallocate(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<deallocate::Response, deallocate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/deallocate",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(deallocate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(deallocate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(deallocate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(deallocate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(deallocate::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(deallocate::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(deallocate::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod deallocate {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn generalize(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<(), generalize::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/generalize",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(generalize::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(generalize::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(generalize::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(generalize::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(generalize::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod generalize {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<VirtualMachineListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/virtualMachines",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_all::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_all {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_available_sizes(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineSizeListResult, list_available_sizes::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/vmSizes",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(list_available_sizes::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_available_sizes::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_available_sizes::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_available_sizes::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineSizeListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_available_sizes::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_available_sizes::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_available_sizes {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn power_off(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<power_off::Response, power_off::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/powerOff",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(power_off::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(power_off::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(power_off::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(power_off::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(power_off::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(power_off::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(power_off::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod power_off {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn restart(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<restart::Response, restart::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/restart",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(restart::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(restart::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(restart::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(restart::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(restart::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(restart::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(restart::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod restart {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn start(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<start::Response, start::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/start",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(start::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(start::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(start::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod start {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn redeploy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<redeploy::Response, redeploy::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/redeploy",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(redeploy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(redeploy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(redeploy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(redeploy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(redeploy::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(redeploy::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(redeploy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod redeploy {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn perform_maintenance(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
subscription_id: &str,
) -> std::result::Result<perform_maintenance::Response, perform_maintenance::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/performMaintenance",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(perform_maintenance::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(perform_maintenance::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(perform_maintenance::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(perform_maintenance::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(perform_maintenance::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(perform_maintenance::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(perform_maintenance::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod perform_maintenance {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn run_command(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_name: &str,
parameters: &RunCommandInput,
subscription_id: &str,
) -> std::result::Result<run_command::Response, run_command::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommand",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_name
);
let mut url = url::Url::parse(url_str).map_err(run_command::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(run_command::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(run_command::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(run_command::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(run_command::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RunCommandResult =
serde_json::from_slice(rsp_body).map_err(|source| run_command::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(run_command::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(run_command::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(run_command::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod run_command {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(RunCommandResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_sizes {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
location: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineSizeListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/vmSizes",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineSizeListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod images {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
image_name: &str,
expand: Option<&str>,
subscription_id: &str,
) -> std::result::Result<Image, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
image_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Image =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
image_name: &str,
parameters: &Image,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
image_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Image = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Image = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Image),
Created201(Image),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
image_name: &str,
parameters: &ImageUpdate,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
image_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Image =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Image =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Image),
Created201(Image),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
image_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
image_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ImageListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImageListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ImageListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/images",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImageListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_scale_sets {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSet, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSet =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
parameters: &VirtualMachineScaleSet,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSet = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSet = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachineScaleSet),
Created201(VirtualMachineScaleSet),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
parameters: &VirtualMachineScaleSetUpdate,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSet, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSet =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn deallocate(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<deallocate::Response, deallocate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/deallocate",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(deallocate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(deallocate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(deallocate::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(deallocate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(deallocate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(deallocate::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(deallocate::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(deallocate::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod deallocate {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_instances(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: &VirtualMachineScaleSetVmInstanceRequiredIDs,
subscription_id: &str,
) -> std::result::Result<delete_instances::Response, delete_instances::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/delete",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(delete_instances::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_instances::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(vm_instance_i_ds).map_err(delete_instances::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete_instances::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_instances::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete_instances::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete_instances::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(delete_instances::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete_instances {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_instance_view(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetInstanceView, get_instance_view::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/instanceView",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(get_instance_view::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_instance_view::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_instance_view::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_instance_view::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetInstanceView = serde_json::from_slice(rsp_body)
.map_err(|source| get_instance_view::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_instance_view::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_instance_view {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetListWithLinkResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/virtualMachineScaleSets",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetListWithLinkResult =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_all::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_all {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_skus(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetListSkusResult, list_skus::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/skus",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(list_skus::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_skus::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_skus::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_skus::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetListSkusResult =
serde_json::from_slice(rsp_body).map_err(|source| list_skus::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_skus::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_skus {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_os_upgrade_history(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetListOsUpgradeHistory, get_os_upgrade_history::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/osUpgradeHistory",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(get_os_upgrade_history::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_os_upgrade_history::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_os_upgrade_history::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_os_upgrade_history::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetListOsUpgradeHistory = serde_json::from_slice(rsp_body)
.map_err(|source| get_os_upgrade_history::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_os_upgrade_history::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_os_upgrade_history {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn power_off(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<power_off::Response, power_off::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/poweroff",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(power_off::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(power_off::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(power_off::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(power_off::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(power_off::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(power_off::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(power_off::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(power_off::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod power_off {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn restart(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<restart::Response, restart::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/restart",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(restart::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(restart::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(restart::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(restart::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(restart::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(restart::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(restart::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(restart::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod restart {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn start(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<start::Response, start::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/start",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(start::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(start::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(start::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(start::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod start {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn redeploy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<redeploy::Response, redeploy::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/redeploy",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(redeploy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(redeploy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(redeploy::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(redeploy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(redeploy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(redeploy::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(redeploy::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(redeploy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod redeploy {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn perform_maintenance(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<perform_maintenance::Response, perform_maintenance::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/performMaintenance",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(perform_maintenance::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(perform_maintenance::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(perform_maintenance::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(perform_maintenance::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(perform_maintenance::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(perform_maintenance::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(perform_maintenance::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(perform_maintenance::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod perform_maintenance {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update_instances(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: &VirtualMachineScaleSetVmInstanceRequiredIDs,
subscription_id: &str,
) -> std::result::Result<update_instances::Response, update_instances::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/manualupgrade",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(update_instances::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update_instances::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(vm_instance_i_ds).map_err(update_instances::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update_instances::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(update_instances::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(update_instances::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(update_instances::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(update_instances::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update_instances {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn reimage(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<reimage::Response, reimage::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/reimage",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(reimage::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(reimage::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(reimage::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(reimage::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(reimage::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(reimage::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(reimage::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(reimage::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod reimage {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn reimage_all(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>,
subscription_id: &str,
) -> std::result::Result<reimage_all::Response, reimage_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/reimageall",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(reimage_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(reimage_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(vm_instance_i_ds) = vm_instance_i_ds {
azure_core::to_json(vm_instance_i_ds).map_err(reimage_all::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(reimage_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(reimage_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(reimage_all::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(reimage_all::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(reimage_all::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod reimage_all {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn force_recovery_service_fabric_platform_update_domain_walk(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
platform_update_domain: i64,
) -> std::result::Result<RecoveryWalkResponse, force_recovery_service_fabric_platform_update_domain_walk::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/forceRecoveryServiceFabricPlatformUpdateDomainWalk" , operation_config . base_path () , subscription_id , resource_group_name , vm_scale_set_name) ;
let mut url = url::Url::parse(url_str).map_err(force_recovery_service_fabric_platform_update_domain_walk::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(force_recovery_service_fabric_platform_update_domain_walk::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
url.query_pairs_mut()
.append_pair("platformUpdateDomain", platform_update_domain.to_string().as_str());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(force_recovery_service_fabric_platform_update_domain_walk::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(force_recovery_service_fabric_platform_update_domain_walk::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RecoveryWalkResponse = serde_json::from_slice(rsp_body).map_err(|source| {
force_recovery_service_fabric_platform_update_domain_walk::Error::DeserializeError(source, rsp_body.clone())
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(
force_recovery_service_fabric_platform_update_domain_walk::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
},
)
}
}
}
pub mod force_recovery_service_fabric_platform_update_domain_walk {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_scale_set_extensions {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vmss_extension_name: &str,
expand: Option<&str>,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetExtension, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetExtension =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vmss_extension_name: &str,
extension_parameters: &VirtualMachineScaleSetExtension,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(extension_parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetExtension = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetExtension = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachineScaleSetExtension),
Created201(VirtualMachineScaleSetExtension),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
vmss_extension_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
vmss_extension_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetExtensionListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetExtensionListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_scale_set_rolling_upgrades {
use crate::models::*;
pub async fn cancel(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<cancel::Response, cancel::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/rollingUpgrades/cancel",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(cancel::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(cancel::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(cancel::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(cancel::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(cancel::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(cancel::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(cancel::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod cancel {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn start_os_upgrade(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<start_os_upgrade::Response, start_os_upgrade::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/osRollingUpgrade",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(start_os_upgrade::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(start_os_upgrade::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(start_os_upgrade::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(start_os_upgrade::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(start_os_upgrade::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(start_os_upgrade::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(start_os_upgrade::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod start_os_upgrade {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_latest(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<RollingUpgradeStatusInfo, get_latest::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/rollingUpgrades/latest",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(get_latest::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_latest::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_latest::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_latest::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RollingUpgradeStatusInfo =
serde_json::from_slice(rsp_body).map_err(|source| get_latest::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_latest::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_latest {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_scale_set_v_ms {
use crate::models::*;
pub async fn reimage(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<reimage::Response, reimage::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/reimage",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(reimage::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(reimage::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(reimage::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(reimage::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(reimage::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(reimage::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(reimage::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod reimage {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn reimage_all(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<reimage_all::Response, reimage_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/reimageall",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(reimage_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(reimage_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(reimage_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(reimage_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(reimage_all::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(reimage_all::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(reimage_all::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod reimage_all {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn deallocate(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<deallocate::Response, deallocate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/deallocate",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(deallocate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(deallocate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(deallocate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(deallocate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(deallocate::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(deallocate::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(deallocate::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod deallocate {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetVm, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetVm =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
parameters: &VirtualMachineScaleSetVm,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetVm =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetVm =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(VirtualMachineScaleSetVm),
Accepted202(VirtualMachineScaleSetVm),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_instance_view(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetVmInstanceView, get_instance_view::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/instanceView",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(get_instance_view::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_instance_view::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_instance_view::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_instance_view::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetVmInstanceView = serde_json::from_slice(rsp_body)
.map_err(|source| get_instance_view::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_instance_view::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_instance_view {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_machine_scale_set_name: &str,
filter: Option<&str>,
select: Option<&str>,
expand: Option<&str>,
subscription_id: &str,
) -> std::result::Result<VirtualMachineScaleSetVmListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_machine_scale_set_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualMachineScaleSetVmListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn power_off(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<power_off::Response, power_off::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/poweroff",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(power_off::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(power_off::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(power_off::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(power_off::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(power_off::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(power_off::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(power_off::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod power_off {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn restart(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<restart::Response, restart::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/restart",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(restart::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(restart::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(restart::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(restart::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(restart::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(restart::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(restart::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod restart {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn start(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<start::Response, start::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/start",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(start::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(start::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(start::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod start {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn redeploy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<redeploy::Response, redeploy::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/redeploy",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(redeploy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(redeploy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(redeploy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(redeploy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(redeploy::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(redeploy::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(redeploy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod redeploy {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn perform_maintenance(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
subscription_id: &str,
) -> std::result::Result<perform_maintenance::Response, perform_maintenance::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/performMaintenance" , operation_config . base_path () , subscription_id , resource_group_name , vm_scale_set_name , instance_id) ;
let mut url = url::Url::parse(url_str).map_err(perform_maintenance::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(perform_maintenance::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(perform_maintenance::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(perform_maintenance::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(perform_maintenance::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(perform_maintenance::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(perform_maintenance::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod perform_maintenance {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn run_command(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
vm_scale_set_name: &str,
instance_id: &str,
parameters: &RunCommandInput,
subscription_id: &str,
) -> std::result::Result<run_command::Response, run_command::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/runCommand",
operation_config.base_path(),
subscription_id,
resource_group_name,
vm_scale_set_name,
instance_id
);
let mut url = url::Url::parse(url_str).map_err(run_command::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(run_command::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(run_command::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(run_command::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(run_command::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RunCommandResult =
serde_json::from_slice(rsp_body).map_err(|source| run_command::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(run_command::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(run_command::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(run_command::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod run_command {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(RunCommandResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod log_analytics {
use crate::models::*;
pub async fn export_request_rate_by_interval(
operation_config: &crate::OperationConfig,
parameters: &RequestRateByIntervalInput,
location: &str,
subscription_id: &str,
) -> std::result::Result<export_request_rate_by_interval::Response, export_request_rate_by_interval::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/logAnalytics/apiAccess/getRequestRateByInterval",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(export_request_rate_by_interval::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(export_request_rate_by_interval::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(export_request_rate_by_interval::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(export_request_rate_by_interval::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(export_request_rate_by_interval::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogAnalyticsOperationResult = serde_json::from_slice(rsp_body)
.map_err(|source| export_request_rate_by_interval::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(export_request_rate_by_interval::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(export_request_rate_by_interval::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(export_request_rate_by_interval::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod export_request_rate_by_interval {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(LogAnalyticsOperationResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn export_throttled_requests(
operation_config: &crate::OperationConfig,
parameters: &ThrottledRequestsInput,
location: &str,
subscription_id: &str,
) -> std::result::Result<export_throttled_requests::Response, export_throttled_requests::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/logAnalytics/apiAccess/getThrottledRequests",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(export_throttled_requests::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(export_throttled_requests::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(export_throttled_requests::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(export_throttled_requests::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(export_throttled_requests::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogAnalyticsOperationResult = serde_json::from_slice(rsp_body)
.map_err(|source| export_throttled_requests::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(export_throttled_requests::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(export_throttled_requests::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(export_throttled_requests::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod export_throttled_requests {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(LogAnalyticsOperationResult),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod virtual_machine_run_commands {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
location: &str,
subscription_id: &str,
) -> std::result::Result<RunCommandListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/runCommands",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RunCommandListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
location: &str,
command_id: &str,
subscription_id: &str,
) -> std::result::Result<RunCommandDocument, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/runCommands/{}",
operation_config.base_path(),
subscription_id,
location,
command_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RunCommandDocument =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod disks {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
disk_name: &str,
) -> std::result::Result<Disk, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
disk_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Disk =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
disk_name: &str,
disk: &Disk,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
disk_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(disk).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Disk = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: Disk = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Disk),
Accepted202(Disk),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
disk_name: &str,
disk: &DiskUpdate,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
disk_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(disk).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Disk =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: Disk =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Disk),
Accepted202(Disk),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
disk_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
disk_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<DiskList, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiskList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<DiskList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/disks",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiskList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn grant_access(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
disk_name: &str,
grant_access_data: &GrantAccessData,
) -> std::result::Result<grant_access::Response, grant_access::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}/beginGetAccess",
operation_config.base_path(),
subscription_id,
resource_group_name,
disk_name
);
let mut url = url::Url::parse(url_str).map_err(grant_access::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(grant_access::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(grant_access_data).map_err(grant_access::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(grant_access::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(grant_access::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AccessUri =
serde_json::from_slice(rsp_body).map_err(|source| grant_access::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(grant_access::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(grant_access::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(grant_access::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod grant_access {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(AccessUri),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn revoke_access(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
disk_name: &str,
) -> std::result::Result<revoke_access::Response, revoke_access::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}/endGetAccess",
operation_config.base_path(),
subscription_id,
resource_group_name,
disk_name
);
let mut url = url::Url::parse(url_str).map_err(revoke_access::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(revoke_access::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(revoke_access::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(revoke_access::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(revoke_access::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(revoke_access::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(revoke_access::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod revoke_access {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod snapshots {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
snapshot_name: &str,
) -> std::result::Result<Snapshot, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
snapshot_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Snapshot =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
snapshot_name: &str,
snapshot: &Snapshot,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
snapshot_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(snapshot).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Snapshot = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: Snapshot = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Snapshot),
Accepted202(Snapshot),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
snapshot_name: &str,
snapshot: &SnapshotUpdate,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
snapshot_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(snapshot).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Snapshot =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: Snapshot =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Snapshot),
Accepted202(Snapshot),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
snapshot_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
snapshot_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<SnapshotList, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SnapshotList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<SnapshotList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Compute/snapshots",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SnapshotList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn grant_access(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
snapshot_name: &str,
grant_access_data: &GrantAccessData,
) -> std::result::Result<grant_access::Response, grant_access::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}/beginGetAccess",
operation_config.base_path(),
subscription_id,
resource_group_name,
snapshot_name
);
let mut url = url::Url::parse(url_str).map_err(grant_access::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(grant_access::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(grant_access_data).map_err(grant_access::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(grant_access::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(grant_access::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AccessUri =
serde_json::from_slice(rsp_body).map_err(|source| grant_access::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(grant_access::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(grant_access::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(grant_access::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod grant_access {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(AccessUri),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn revoke_access(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
snapshot_name: &str,
) -> std::result::Result<revoke_access::Response, revoke_access::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}/endGetAccess",
operation_config.base_path(),
subscription_id,
resource_group_name,
snapshot_name
);
let mut url = url::Url::parse(url_str).map_err(revoke_access::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(revoke_access::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(revoke_access::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(revoke_access::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(revoke_access::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(revoke_access::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(revoke_access::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod revoke_access {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 47.832839 | 277 | 0.588954 |
69f68f8e38e66bc43738e468b6eafa1efff8b54c | 959 | //! [POST /_matrix/client/r0/account/3pid/unbind](https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-account-3pid-unbind)
use ruma_api::ruma_api;
use super::ThirdPartyIdRemovalStatus;
use crate::r0::thirdparty::Medium;
ruma_api! {
metadata {
description: "Unbind a 3PID from a user's account on an identity server.",
method: POST,
name: "unbind_3pid",
path: "/_matrix/client/r0/account/3pid/unbind",
rate_limited: false,
requires_authentication: true,
}
request {
/// Identity server to unbind from.
#[serde(skip_serializing_if = "Option::is_none")]
id_server: Option<String>,
/// Medium of the 3PID to be removed.
medium: Medium,
/// Third-party address being removed.
address: String,
}
response {
/// Result of unbind operation.
id_server_unbind_result: ThirdPartyIdRemovalStatus,
}
}
| 28.205882 | 142 | 0.642336 |
d914138a04b700055a84ad941b0e825b7fc3b35c | 280 | fn main(){
let mut x = 17;
println!("X = {}",x);
x = my_func();
println!("X = {}",x);
x = calc(100,100);
println!("X = {}",x);
x = |x| =>{
64
}
println!("X = {}",x);
}
fn my_func() -> i32{
let x = 9;
x
}
fn calc(x:i32,y:i32) -> i32{
let result = x * y;
result
}
| 11.666667 | 28 | 0.45 |
913fd7f61fa81748b407e3c14de60ec45c94fc82 | 14,345 | use crate::BoundingBox;
use cgmath::prelude::*;
use cgmath::{Matrix3, Rad, Vector3};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
/// GeometryParameter
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(untagged)]
pub enum GeometryParameter {
/// Fixed value across all instances.
Constant(f32),
/// Reference into a parameter table.
Symbolic(String),
}
impl GeometryParameter {
pub fn value(&self, symbolic_values: &BTreeMap<String, f32>) -> f32 {
match self {
Self::Constant(number) => *number,
Self::Symbolic(symbol) => symbolic_values[symbol],
}
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(tag = "type", rename_all = "kebab-case")]
pub enum Geometry {
Sphere {
radius: GeometryParameter,
},
Ellipsoid {
radius: [GeometryParameter; 3],
},
Cuboid {
dimensions: [GeometryParameter; 3],
},
Cylinder {
height: GeometryParameter,
radius: GeometryParameter,
},
Union {
children: Vec<Geometry>,
},
Intersection {
children: Vec<Geometry>,
},
Subtraction {
lhs: Box<Geometry>,
rhs: Box<Geometry>,
},
Onion {
thickness: GeometryParameter,
child: Box<Geometry>,
},
Scale {
factor: GeometryParameter,
child: Box<Geometry>,
},
Rotate {
axis: [GeometryParameter; 3],
angle: GeometryParameter,
child: Box<Geometry>,
},
Translate {
translation: [GeometryParameter; 3],
child: Box<Geometry>,
},
Round {
radius: GeometryParameter,
child: Box<Geometry>,
},
ForceNumericalNormals {
child: Box<Geometry>,
},
CustomModifier {
code: String,
expansion: [f32; 3],
child: Box<Geometry>,
},
Twist {
amount: GeometryParameter,
step: GeometryParameter,
child: Box<Geometry>,
},
}
impl Geometry {
/// Returns the approximate cost of evaluating the distance field, based on
/// the arbitrary measure that the evaluation cost of the unit sphere is 1.
pub fn evaluation_cost(&self) -> f32 {
match self {
Self::Sphere { .. } => 1.0,
Self::Ellipsoid { .. } => 1.0,
Self::Cuboid { .. } => 1.5,
Self::Cylinder { .. } => 2.0,
Self::Union { children } => children.iter().map(|x| 0.25 + x.evaluation_cost()).sum(),
Self::Intersection { children } => {
children.iter().map(|x| 0.5 + x.evaluation_cost()).sum()
}
Self::Subtraction { lhs, rhs } => lhs.evaluation_cost() + rhs.evaluation_cost() + 0.25,
Self::Onion { child, .. } => child.evaluation_cost() + 0.25,
Self::Scale { child, .. } => child.evaluation_cost() + 1.0,
Self::Rotate { child, .. } => child.evaluation_cost() + 2.0,
Self::Translate { child, .. } => child.evaluation_cost() + 0.25,
Self::Round { child, .. } => child.evaluation_cost() + 0.25,
Self::ForceNumericalNormals { child } => child.evaluation_cost(),
Self::CustomModifier { child, .. } => child.evaluation_cost() + 3.0,
Self::Twist { child, .. } => child.evaluation_cost() + 1.0,
}
}
/// Returns a bounding box for an instance of this geometry, or panics if
/// some referenced parameter values are absent from the parameter table.
pub fn bounding_box(&self, parameters: &BTreeMap<String, f32>) -> BoundingBox {
self.bounds(parameters).bbox
}
fn bounds(&self, parameters: &BTreeMap<String, f32>) -> GeometryBounds {
match self {
Self::Sphere { radius } => {
let radius = radius.value(parameters);
GeometryBounds {
bbox: BoundingBox {
min: [-radius; 3].into(),
max: [radius; 3].into(),
},
scale_factor: 1.0,
}
}
Self::Ellipsoid { radius } => {
let radius_x = radius[0].value(parameters);
let radius_y = radius[1].value(parameters);
let radius_z = radius[2].value(parameters);
let max_radius = radius_x.max(radius_y).max(radius_z);
let min_radius = radius_x.min(radius_y).min(radius_z);
GeometryBounds {
bbox: BoundingBox {
min: [-radius_x, -radius_y, -radius_z].into(),
max: [radius_x, radius_y, radius_z].into(),
},
scale_factor: max_radius / min_radius,
}
}
Self::Cuboid { dimensions } => {
let dim_x = dimensions[0].value(parameters);
let dim_y = dimensions[1].value(parameters);
let dim_z = dimensions[2].value(parameters);
GeometryBounds {
bbox: BoundingBox {
min: [-dim_x, -dim_y, -dim_z].into(),
max: [dim_x, dim_y, dim_z].into(),
},
scale_factor: 1.0,
}
}
Self::Cylinder { height, radius } => {
let height = height.value(parameters);
let radius = radius.value(parameters);
GeometryBounds {
bbox: BoundingBox {
min: [-radius, -height, -radius].into(),
max: [radius, height, radius].into(),
},
scale_factor: 1.0,
}
}
Self::Union { children } => {
let mut bbox = BoundingBox::neg_infinity_bounds();
let mut scale_factor: f32 = 0.0;
for child in children {
let bounds = child.bounds(parameters);
scale_factor = scale_factor.max(bounds.scale_factor);
bbox.extend(&bounds.bbox);
}
GeometryBounds { bbox, scale_factor }
}
Self::Intersection { children } => {
let mut bbox = BoundingBox::pos_infinity_bounds();
let mut scale_factor: f32 = 0.0;
for child in children {
let bounds = child.bounds(parameters);
scale_factor = scale_factor.max(bounds.scale_factor);
bbox.intersect(&bounds.bbox);
}
GeometryBounds { bbox, scale_factor }
}
Self::Subtraction { lhs, .. } => lhs.bounds(parameters),
Self::Onion { thickness, child } => {
let mut bounds = child.bounds(parameters);
let thickness = thickness.value(parameters) * bounds.scale_factor;
bounds.bbox.min.x -= thickness;
bounds.bbox.min.y -= thickness;
bounds.bbox.min.z -= thickness;
bounds.bbox.max.x += thickness;
bounds.bbox.max.y += thickness;
bounds.bbox.max.z += thickness;
bounds
}
Self::Scale { factor, child } => {
let mut bounds = child.bounds(parameters);
bounds.bbox.min *= factor.value(parameters);
bounds.bbox.max *= factor.value(parameters);
bounds
}
Self::Rotate { axis, angle, child } => {
let rotation_axis: Vector3<f32> = [
axis[0].value(parameters),
axis[1].value(parameters),
axis[2].value(parameters),
]
.into();
let rotation = Matrix3::from_axis_angle(
rotation_axis.normalize(),
Rad(angle.value(parameters)),
);
let mut bounds = child.bounds(parameters);
bounds.bbox = bounds.bbox.transform(rotation);
bounds
}
Self::Translate { translation, child } => {
let mut bounds = child.bounds(parameters);
bounds.bbox.min.x += translation[0].value(parameters);
bounds.bbox.min.y += translation[1].value(parameters);
bounds.bbox.min.z += translation[2].value(parameters);
bounds.bbox.max.x += translation[0].value(parameters);
bounds.bbox.max.y += translation[1].value(parameters);
bounds.bbox.max.z += translation[2].value(parameters);
bounds
}
Self::Round { child, radius } => {
let mut bounds = child.bounds(parameters);
let radius = radius.value(parameters) * bounds.scale_factor;
bounds.bbox.min.x -= radius;
bounds.bbox.min.y -= radius;
bounds.bbox.min.z -= radius;
bounds.bbox.max.x += radius;
bounds.bbox.max.y += radius;
bounds.bbox.max.z += radius;
bounds
}
Self::ForceNumericalNormals { child } => child.bounds(parameters),
Self::CustomModifier {
child, expansion, ..
} => {
let mut bounds = child.bounds(parameters);
bounds.bbox.min.x -= expansion[0];
bounds.bbox.min.y -= expansion[1];
bounds.bbox.min.z -= expansion[2];
bounds.bbox.max.x += expansion[0];
bounds.bbox.max.y += expansion[1];
bounds.bbox.max.z += expansion[2];
bounds
}
Self::Twist { child, .. } => {
let mut bounds = child.bounds(parameters);
let dx = bounds.bbox.max.x - bounds.bbox.min.x;
let dy = bounds.bbox.max.y - bounds.bbox.min.y;
let dz = bounds.bbox.max.z - bounds.bbox.min.z;
bounds.bbox.min.x -= dx * 0.5;
bounds.bbox.min.y -= dy * 0.5;
bounds.bbox.min.z -= dz * 0.5;
bounds.bbox.max.x += dx * 0.5;
bounds.bbox.max.y += dy * 0.5;
bounds.bbox.max.z += dz * 0.5;
bounds
}
}
}
/// Returns a vector of all symbolic parameters found in this geometry in
/// a deterministic order, representing the approximate evaluation order.
pub fn symbolic_parameters(&self) -> Vec<&str> {
let mut parameters = vec![];
self.symbolic_parameters_recursive(&mut parameters);
parameters
}
fn symbolic_parameters_recursive<'a>(&'a self, parameters: &mut Vec<&'a str>) {
match self {
Self::Sphere { radius } => {
Self::record_parameter(parameters, radius);
}
Self::Ellipsoid { radius } => {
Self::record_parameter(parameters, &radius[0]);
Self::record_parameter(parameters, &radius[1]);
Self::record_parameter(parameters, &radius[2]);
}
Self::Cuboid { dimensions } => {
Self::record_parameter(parameters, &dimensions[0]);
Self::record_parameter(parameters, &dimensions[1]);
Self::record_parameter(parameters, &dimensions[2]);
}
Self::Cylinder { height, radius } => {
Self::record_parameter(parameters, height);
Self::record_parameter(parameters, radius);
}
Self::Union { children } | Self::Intersection { children } => {
for child in children {
child.symbolic_parameters_recursive(parameters);
}
}
Self::Subtraction { lhs, rhs } => {
lhs.symbolic_parameters_recursive(parameters);
rhs.symbolic_parameters_recursive(parameters);
}
Self::Onion { thickness, child } => {
Self::record_parameter(parameters, thickness);
child.symbolic_parameters_recursive(parameters);
}
Self::Scale { factor, child } => {
Self::record_parameter(parameters, factor);
child.symbolic_parameters_recursive(parameters);
}
Self::Rotate { axis, angle, child } => {
Self::record_parameter(parameters, &axis[0]);
Self::record_parameter(parameters, &axis[1]);
Self::record_parameter(parameters, &axis[2]);
Self::record_parameter(parameters, angle);
child.symbolic_parameters_recursive(parameters);
}
Self::Translate { translation, child } => {
Self::record_parameter(parameters, &translation[0]);
Self::record_parameter(parameters, &translation[1]);
Self::record_parameter(parameters, &translation[2]);
child.symbolic_parameters_recursive(parameters);
}
Self::Round { child, radius } => {
Self::record_parameter(parameters, radius);
child.symbolic_parameters_recursive(parameters);
}
Self::ForceNumericalNormals { child } => {
child.symbolic_parameters_recursive(parameters);
}
Self::CustomModifier { child, .. } => {
child.symbolic_parameters_recursive(parameters);
}
Self::Twist {
child,
amount,
step,
} => {
Self::record_parameter(parameters, amount);
Self::record_parameter(parameters, step);
child.symbolic_parameters_recursive(parameters);
}
}
}
fn record_parameter<'a>(parameters: &mut Vec<&'a str>, parameter: &'a GeometryParameter) {
if let GeometryParameter::Symbolic(symbol) = parameter {
parameters.push(symbol);
}
}
}
#[derive(Debug)]
struct GeometryBounds {
bbox: BoundingBox,
scale_factor: f32,
}
| 35.68408 | 99 | 0.508888 |
9c40b7fe22c93e9e9c67d2f5d57ef10401539c3b | 2,649 | use assert_cmd::prelude::*;
use predicates::prelude::*;
use std::process::Command;
const DATA_PATH: &'static str = "tests/data";
#[test]
fn no_jobs_valid_dir_lines() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg(DATA_PATH)
.assert()
.success()
.stdout(predicate::str::contains("45"));
Ok(())
}
#[test]
fn with_jobs_valid_dir_lines() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("-j2")
.arg(DATA_PATH)
.assert()
.success()
.stdout(predicate::str::contains("45"));
Ok(())
}
#[test]
fn no_jobs_valid_dir_words() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("-w")
.arg(DATA_PATH)
.assert()
.success()
.stdout(predicate::str::contains("120"));
Ok(())
}
#[test]
fn with_jobs_valid_dir_words() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("-wj2")
.arg(DATA_PATH)
.assert()
.success()
.stdout(predicate::str::contains("120"));
Ok(())
}
#[test]
fn no_jobs_multiple_path_args() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("tests/data/data.rs")
.arg("tests/data/data.py")
.arg("tests/data/data.txt")
.assert()
.success()
.stdout(predicate::str::contains("45"));
Ok(())
}
#[test]
fn no_jobs_invalid_path() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("fake_dir").assert().failure().stdout(
predicate::str::contains("No such file or directory")
.or(predicate::str::contains("cannot find the path specified")),
);
Ok(())
}
#[test]
fn invalid_flag() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("--xxxyyyzzz")
.arg(DATA_PATH)
.assert()
.failure()
.stderr(predicate::str::contains(
"Found argument '--xxxyyyzzz' which wasn't expected",
));
Ok(())
}
#[test]
fn get_help() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("--help").assert().success();
cmd.arg("-h").assert().success();
Ok(())
}
#[test]
fn get_version() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("xloc")?;
cmd.arg("--version").assert().success();
cmd.arg("-V").assert().success();
Ok(())
}
| 22.449153 | 76 | 0.558701 |
21540994fba422b7e76ed39c8c817f95dc2f621e | 588 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use failure::Error;
use fuchsia_async as fasync;
pub mod context;
pub mod invocation;
pub mod leave_command;
pub mod provision_command;
pub mod status_command;
use context::*;
use invocation::*;
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
let mut context: LowpanCtlContext = LowpanCtlContext::new();
let args: LowpanCtlInvocation = argh::from_env();
args.exec(&mut context).await
}
| 24.5 | 73 | 0.734694 |
5b7110866ce69155a823d2460eeb59160a813d4b | 6,185 | // Copyright 2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use super::{
cluster::{ClusterBuilder, ClusterHandle},
listener::{ListenerBuilder, ListenerHandle},
websocket::WsTx,
*,
};
pub(crate) use crate::cql::{CqlBuilder, PasswordAuth};
use anyhow::{anyhow, bail};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
net::SocketAddr,
ops::{Deref, DerefMut},
};
mod event_loop;
mod init;
mod starter;
mod terminating;
/// Define the application scope trait
pub trait ScyllaScope: LauncherSender<ScyllaBuilder<Self>> {}
impl<H: LauncherSender<ScyllaBuilder<H>>> ScyllaScope for H {}
// Scylla builder
builder!(
#[derive(Clone)]
ScyllaBuilder<H> {
listen_address: String,
reporter_count: u8,
thread_count: usize,
local_dc: String,
buffer_size: usize,
recv_buffer_size: u32,
send_buffer_size: u32,
listener_handle: ListenerHandle,
cluster_handle: ClusterHandle,
authenticator: PasswordAuth
});
#[derive(Deserialize, Serialize)]
/// It's the Interface the scylla app to dynamiclly configure the application during runtime
pub enum ScyllaThrough {
/// Shutdown json to gracefully shutdown scylla app
Shutdown,
/// Alter the scylla topology
Topology(Topology),
}
#[allow(missing_docs)]
#[repr(u8)]
#[derive(PartialEq)]
pub enum Caller {
Launcher = 0,
Other = 1,
}
/// ScyllaHandle to be passed to the children (Listener and Cluster)
pub struct ScyllaHandle<H: ScyllaScope> {
pub(crate) caller: Caller,
tx: tokio::sync::mpsc::UnboundedSender<ScyllaEvent<H::AppsEvents>>,
}
/// ScyllaInbox used to recv events
pub struct ScyllaInbox<H: ScyllaScope> {
rx: tokio::sync::mpsc::UnboundedReceiver<ScyllaEvent<H::AppsEvents>>,
}
impl<H: ScyllaScope> Clone for ScyllaHandle<H> {
fn clone(&self) -> Self {
ScyllaHandle::<H> {
caller: Caller::Other,
tx: self.tx.clone(),
}
}
}
/// Application state
pub struct Scylla<H: ScyllaScope> {
service: Service,
listener_handle: Option<ListenerHandle>,
cluster_handle: Option<ClusterHandle>,
websockets: HashMap<String, WsTx>,
handle: Option<ScyllaHandle<H>>,
inbox: ScyllaInbox<H>,
}
/// SubEvent type, indicated the children
pub enum ScyllaChild {
/// Used by Listener to keep scylla up to date with its service
Listener(Service),
/// Used by Cluster to keep scylla up to date with its service
Cluster(Service),
/// Used by Websocket to keep scylla up to date with its service
Websocket(Service, Option<WsTx>),
}
/// Event type of the Scylla Application
pub enum ScyllaEvent<T> {
/// It's the passthrough event, which the scylla application will receive from
Passthrough(T),
/// Used by scylla children to push their service
Children(ScyllaChild),
/// Used by cluster to inform scylla in order to inform the sockets with the result of topology events
Result(SocketMsg<Result<Topology, Topology>>),
/// Abort the scylla app, sent by launcher
Abort,
}
#[derive(Deserialize, Serialize, Debug)]
/// Topology event
pub enum Topology {
/// AddNode json to add new scylla node
AddNode(SocketAddr),
/// RemoveNode json to remove an existing scylla node
RemoveNode(SocketAddr),
/// BuildRing json to re/build the cluster topology,
/// Current limitation: for now the admin supposed to define uniform replication factor among all DataCenter and
/// all keyspaces
BuildRing(u8),
}
#[derive(Deserialize, Serialize)]
/// Indicates which app this message is for
pub enum SocketMsg<T> {
/// Message for Scylla app
Scylla(T),
}
/// implementation of the AppBuilder
impl<H: ScyllaScope> AppBuilder<H> for ScyllaBuilder<H> {}
/// implementation of through type
impl<H: ScyllaScope> ThroughType for ScyllaBuilder<H> {
type Through = ScyllaThrough;
}
/// implementation of builder
impl<H: ScyllaScope> Builder for ScyllaBuilder<H> {
type State = Scylla<H>;
fn build(self) -> Self::State {
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
let handle = Some(ScyllaHandle {
caller: Caller::Other,
tx,
});
let inbox = ScyllaInbox { rx };
Scylla::<H> {
service: Service::new(),
listener_handle: Some(self.listener_handle.expect("Expected Listener handle")),
cluster_handle: self.cluster_handle,
websockets: HashMap::new(),
handle,
inbox,
}
.set_name()
}
}
// TODO integrate well with other services;
/// implementation of passthrough functionality
impl<H: ScyllaScope> Passthrough<ScyllaThrough> for ScyllaHandle<H> {
fn launcher_status_change(&mut self, _service: &Service) {}
fn app_status_change(&mut self, _service: &Service) {}
fn passthrough(&mut self, _event: ScyllaThrough, _from_app_name: String) {}
fn service(&mut self, _service: &Service) {}
}
/// implementation of shutdown functionality
impl<H: ScyllaScope> Shutdown for ScyllaHandle<H> {
fn shutdown(self) -> Option<Self>
where
Self: Sized,
{
let scylla_shutdown: H::AppsEvents = serde_json::from_str("{\"Scylla\": \"Shutdown\"}").unwrap();
let _ = self.send(ScyllaEvent::Passthrough(scylla_shutdown));
// if the caller is launcher we abort scylla app. better solution will be implemented in future
if self.caller == Caller::Launcher {
let _ = self.send(ScyllaEvent::Abort);
}
None
}
}
impl<H: ScyllaScope> Deref for ScyllaHandle<H> {
type Target = tokio::sync::mpsc::UnboundedSender<ScyllaEvent<H::AppsEvents>>;
fn deref(&self) -> &Self::Target {
&self.tx
}
}
impl<H: ScyllaScope> DerefMut for ScyllaHandle<H> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.tx
}
}
/// impl name of the application
impl<H: ScyllaScope> Name for Scylla<H> {
fn set_name(mut self) -> Self {
self.service.update_name("Scylla".to_string());
self
}
fn get_name(&self) -> String {
self.service.get_name()
}
}
| 29.312796 | 116 | 0.664349 |
8a9ae505737edb79f77f12d9e64ae56434ac9dd1 | 5,381 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::Device;
use crate::DeviceTool;
use crate::Display;
use crate::SeatCapabilities;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::glib_wrapper! {
pub struct Seat(Object<ffi::GdkSeat>);
match fn {
get_type => || ffi::gdk_seat_get_type(),
}
}
impl Seat {
#[doc(alias = "gdk_seat_get_capabilities")]
pub fn get_capabilities(&self) -> SeatCapabilities {
unsafe { from_glib(ffi::gdk_seat_get_capabilities(self.to_glib_none().0)) }
}
#[doc(alias = "gdk_seat_get_devices")]
pub fn get_devices(&self, capabilities: SeatCapabilities) -> Vec<Device> {
unsafe {
FromGlibPtrContainer::from_glib_container(ffi::gdk_seat_get_devices(
self.to_glib_none().0,
capabilities.to_glib(),
))
}
}
#[doc(alias = "gdk_seat_get_display")]
pub fn get_display(&self) -> Option<Display> {
unsafe { from_glib_none(ffi::gdk_seat_get_display(self.to_glib_none().0)) }
}
#[doc(alias = "gdk_seat_get_keyboard")]
pub fn get_keyboard(&self) -> Option<Device> {
unsafe { from_glib_none(ffi::gdk_seat_get_keyboard(self.to_glib_none().0)) }
}
#[doc(alias = "gdk_seat_get_pointer")]
pub fn get_pointer(&self) -> Option<Device> {
unsafe { from_glib_none(ffi::gdk_seat_get_pointer(self.to_glib_none().0)) }
}
#[doc(alias = "gdk_seat_get_tools")]
pub fn get_tools(&self) -> Vec<DeviceTool> {
unsafe {
FromGlibPtrContainer::from_glib_container(ffi::gdk_seat_get_tools(
self.to_glib_none().0,
))
}
}
pub fn connect_device_added<F: Fn(&Seat, &Device) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn device_added_trampoline<F: Fn(&Seat, &Device) + 'static>(
this: *mut ffi::GdkSeat,
device: *mut ffi::GdkDevice,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this), &from_glib_borrow(device))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"device-added\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
device_added_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_device_removed<F: Fn(&Seat, &Device) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn device_removed_trampoline<F: Fn(&Seat, &Device) + 'static>(
this: *mut ffi::GdkSeat,
device: *mut ffi::GdkDevice,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this), &from_glib_borrow(device))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"device-removed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
device_removed_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_tool_added<F: Fn(&Seat, &DeviceTool) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn tool_added_trampoline<F: Fn(&Seat, &DeviceTool) + 'static>(
this: *mut ffi::GdkSeat,
tool: *mut ffi::GdkDeviceTool,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this), &from_glib_borrow(tool))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"tool-added\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
tool_added_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_tool_removed<F: Fn(&Seat, &DeviceTool) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn tool_removed_trampoline<F: Fn(&Seat, &DeviceTool) + 'static>(
this: *mut ffi::GdkSeat,
tool: *mut ffi::GdkDeviceTool,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this), &from_glib_borrow(tool))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"tool-removed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
tool_removed_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for Seat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Seat")
}
}
| 33.216049 | 100 | 0.529455 |
e5528f9162346f74933d8dd1ba653e7b901795ae | 80,641 | mod breakpoints;
mod variables;
use crate::prelude::*;
use crate::cancellation;
use crate::dap_session::DAPSession;
use crate::debug_event_listener;
use crate::debug_protocol::*;
use crate::disassembly;
use crate::fsutil::normalize_path;
use crate::handles::{self, HandleTree};
use crate::must_initialize::{Initialized, MustInitialize, NotInitialized};
use crate::platform::{get_fs_path_case, make_case_folder, pipe};
use crate::python::{self, PythonInterface};
use crate::shared::Shared;
use crate::terminal::Terminal;
use breakpoints::BreakpointsState;
use variables::Container;
use std;
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ffi::CStr;
use std::fmt::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::str;
use std::time;
use futures;
use futures::prelude::*;
use lldb::*;
use serde_json;
use tokio::io::AsyncReadExt;
use tokio::sync::mpsc;
pub struct DebugSession {
self_ref: MustInitialize<Shared<DebugSession>>,
dap_session: DAPSession,
event_listener: SBListener,
python: Option<Box<PythonInterface>>,
current_cancellation: cancellation::Receiver, // Cancellation associated with request currently being processed
console_pipe: std::fs::File,
debugger: SBDebugger,
target: MustInitialize<SBTarget>,
process: MustInitialize<SBProcess>,
terminate_on_disconnect: bool,
no_debug: bool,
breakpoints: RefCell<BreakpointsState>,
var_refs: HandleTree<Container>,
disassembly: MustInitialize<disassembly::AddressSpace>,
source_map_cache: RefCell<HashMap<PathBuf, Option<Rc<PathBuf>>>>,
loaded_modules: Vec<SBModule>,
relative_path_base: MustInitialize<PathBuf>,
exit_commands: Option<Vec<String>>,
debuggee_terminal: Option<Terminal>,
selected_frame_changed: bool,
last_goto_request: Option<GotoTargetsArguments>,
client_caps: MustInitialize<InitializeRequestArguments>,
default_expr_type: Expressions,
global_format: Format,
show_disassembly: ShowDisassembly,
deref_pointers: bool,
console_mode: ConsoleMode,
suppress_missing_files: bool,
evaluate_for_hovers: bool,
command_completions: bool,
evaluation_timeout: time::Duration,
source_languages: Vec<String>,
terminal_prompt_clear: Option<Vec<String>>,
}
// AsyncResponse is used to "smuggle" futures out of request handlers
// in the few cases when we need to respond asynchronously.
struct AsyncResponse(pub Box<dyn Future<Output = Result<ResponseBody, Error>> + 'static>);
impl std::error::Error for AsyncResponse {}
impl std::fmt::Debug for AsyncResponse {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "AsyncResponse")
}
}
impl std::fmt::Display for AsyncResponse {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "AsyncResponse")
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
unsafe impl Send for DebugSession {}
impl DebugSession {
pub fn run(dap_session: DAPSession, settings: AdapterSettings) -> impl Future {
let debugger = SBDebugger::create(false);
debugger.set_async_mode(true);
let (con_reader, con_writer) = pipe().unwrap();
log_errors!(debugger.set_output_stream(con_writer.try_clone().unwrap()));
let current_exe = env::current_exe().unwrap();
let (python, python_events) = match python::initialize(
debugger.command_interpreter(),
current_exe.parent().unwrap(),
Some(con_writer.try_clone().unwrap()),
) {
Ok((python, events)) => (Some(python), Some(events)),
Err(err) => {
error!("Initialize Python interpreter: {}", err);
(None, None)
}
};
let con_reader = tokio::fs::File::from_std(con_reader);
let mut debug_session = DebugSession {
self_ref: NotInitialized,
dap_session: dap_session,
event_listener: SBListener::new_with_name("DebugSession"),
python: python,
current_cancellation: cancellation::dummy(),
console_pipe: con_writer,
debugger: debugger,
target: NotInitialized,
process: NotInitialized,
terminate_on_disconnect: false,
no_debug: false,
breakpoints: RefCell::new(BreakpointsState::new()),
var_refs: HandleTree::new(),
disassembly: NotInitialized,
source_map_cache: RefCell::new(HashMap::new()),
loaded_modules: Vec::new(),
relative_path_base: NotInitialized,
exit_commands: None,
debuggee_terminal: None,
selected_frame_changed: false,
last_goto_request: None,
client_caps: NotInitialized,
default_expr_type: Expressions::Simple,
global_format: Format::Default,
show_disassembly: ShowDisassembly::Auto,
deref_pointers: true,
console_mode: ConsoleMode::Commands,
suppress_missing_files: true,
evaluate_for_hovers: true,
command_completions: true,
evaluation_timeout: time::Duration::from_secs(5),
source_languages: vec!["cpp".into()],
terminal_prompt_clear: None,
};
DebugSession::pipe_console_events(&debug_session.dap_session, con_reader);
if let Some(python_events) = python_events {
DebugSession::pipe_python_events(&debug_session.dap_session, python_events);
}
let mut requests_receiver = DebugSession::cancellation_filter(&debug_session.dap_session.clone());
let mut debug_events_stream = debug_event_listener::start_polling(&debug_session.event_listener);
debug_session.update_adapter_settings(&settings);
// The main event loop, where we react to incoming events from different sources.
let shared_session = Shared::new(debug_session);
shared_session.try_map(|s| s.self_ref = Initialized(shared_session.clone())).unwrap();
let local_set = tokio::task::LocalSet::new();
local_set.spawn_local(async move {
loop {
tokio::select! {
// Requests from VSCode
request = requests_receiver.recv() => {
match request {
Some((request, cancellation)) => shared_session.map(
|s| s.handle_request(request, cancellation)).await,
None => {
debug!("End of the requests stream");
break;
}
}
}
// LLDB events.
Some(event) = debug_events_stream.recv() => {
shared_session.map( |s| s.handle_debug_event(event)).await;
}
}
}
shared_session.map(|s| s.self_ref = NotInitialized).await;
});
local_set
}
fn pipe_console_events(dap_session: &DAPSession, mut con_reader: tokio::fs::File) {
let dap_session = dap_session.clone();
tokio::spawn(async move {
let mut con_data = [0u8; 1024];
loop {
if let Ok(bytes) = con_reader.read(&mut con_data).await {
let event = EventBody::output(OutputEventBody {
output: String::from_utf8_lossy(&con_data[0..bytes]).into(),
..Default::default()
});
log_errors!(dap_session.send_event(event).await);
}
}
});
}
fn pipe_python_events(dap_session: &DAPSession, mut python_events: mpsc::Receiver<EventBody>) {
let dap_session = dap_session.clone();
tokio::spawn(async move {
while let Some(event) = python_events.recv().await {
log_errors!(dap_session.send_event(event).await);
}
});
}
/// Handle request cancellations.
fn cancellation_filter(dap_session: &DAPSession) -> mpsc::Receiver<(Request, cancellation::Receiver)> {
use tokio::sync::broadcast::error::RecvError;
let mut raw_requests_stream = dap_session.subscribe_requests().unwrap();
let (requests_sender, requests_receiver) = mpsc::channel::<(Request, cancellation::Receiver)>(100);
// This task pairs incoming requests with a cancellation token, which is activated upon receiving a "cancel" request.
let filter = async move {
let mut pending_requests: HashMap<u32, cancellation::Sender> = HashMap::new();
let mut cancellable_requests: Vec<cancellation::Sender> = Vec::new();
loop {
match raw_requests_stream.recv().await {
Ok(request) => {
let sender = cancellation::Sender::new();
let receiver = sender.subscribe();
match &request.command {
Command::Known(request_args) => match request_args {
RequestArguments::cancel(args) => {
info!("Cancellation {:?}", args);
if let Some(id) = args.request_id {
if let Some(sender) = pending_requests.remove(&(id as u32)) {
sender.send();
}
}
continue; // Dont forward to the main event loop.
}
// Requests that may be canceled.
RequestArguments::scopes(_)
| RequestArguments::variables(_)
| RequestArguments::evaluate(_) => cancellable_requests.push(sender.clone()),
// Requests that will cancel the above.
RequestArguments::continue_(_)
| RequestArguments::pause(_)
| RequestArguments::next(_)
| RequestArguments::stepIn(_)
| RequestArguments::stepOut(_)
| RequestArguments::stepBack(_)
| RequestArguments::reverseContinue(_)
| RequestArguments::terminate(_)
| RequestArguments::disconnect(_) => {
for sender in &mut cancellable_requests {
sender.send();
}
}
_ => (),
},
_ => (),
}
pending_requests.insert(request.seq, sender);
log_errors!(requests_sender.send((request, receiver)).await);
// Clean out entries which don't have any receivers.
pending_requests.retain(|_k, v| v.receiver_count() > 0);
cancellable_requests.retain(|v| v.receiver_count() > 0);
}
Err(RecvError::Lagged(count)) => error!("Missed {} messages", count),
Err(RecvError::Closed) => break,
}
}
};
tokio::spawn(filter);
requests_receiver
}
fn handle_request(&mut self, request: Request, cancellation: cancellation::Receiver) {
let seq = request.seq;
match request.command {
Command::Known(args) => {
if cancellation.is_cancelled() {
self.send_response(seq, Err("canceled".into()));
} else {
self.current_cancellation = cancellation;
let result = self.handle_request_args(args);
self.current_cancellation = cancellation::dummy();
match result {
// Spawn async responses as tasks
Err(err) if err.is::<AsyncResponse>() => {
let self_ref = self.self_ref.clone();
tokio::task::spawn_local(async move {
let fut: std::pin::Pin<Box<_>> = err.downcast::<AsyncResponse>().unwrap().0.into();
let result = fut.await;
self_ref.map(|s| s.send_response(seq, result)).await;
});
}
// Send synchronous results immediately
_ => {
self.send_response(seq, result);
}
}
}
}
Command::Unknown {
command,
} => {
info!("Received an unknown command: {}", command);
self.send_response(seq, Err("Not implemented.".into()));
}
}
}
#[rustfmt::skip]
fn handle_request_args(&mut self, arguments: RequestArguments) -> Result<ResponseBody, Error> {
match arguments {
RequestArguments::_adapterSettings(args) =>
self.handle_adapter_settings(args)
.map(|_| ResponseBody::_adapterSettings),
RequestArguments::initialize(args) =>
self.handle_initialize(args)
.map(|r| ResponseBody::initialize(r)),
RequestArguments::launch(args) =>
self.handle_launch(args),
RequestArguments::attach(args) =>
self.handle_attach(args),
RequestArguments::configurationDone(_) =>
self.handle_configuration_done()
.map(|_| ResponseBody::configurationDone),
RequestArguments::disconnect(args) =>
self.handle_disconnect(args)
.map(|_| ResponseBody::disconnect),
_ => {
if self.no_debug {
bail!("Not supported in noDebug mode.")
} else {
match arguments {
RequestArguments::setBreakpoints(args) =>
self.handle_set_breakpoints(args)
.map(|r| ResponseBody::setBreakpoints(r)),
RequestArguments::setFunctionBreakpoints(args) =>
self.handle_set_function_breakpoints(args)
.map(|r| ResponseBody::setFunctionBreakpoints(r)),
RequestArguments::setExceptionBreakpoints(args) =>
self.handle_set_exception_breakpoints(args)
.map(|_| ResponseBody::setExceptionBreakpoints),
RequestArguments::threads(_) =>
self.handle_threads()
.map(|r| ResponseBody::threads(r)),
RequestArguments::stackTrace(args) =>
self.handle_stack_trace(args)
.map(|r| ResponseBody::stackTrace(r)),
RequestArguments::scopes(args) =>
self.handle_scopes(args)
.map(|r| ResponseBody::scopes(r)),
RequestArguments::variables(args) =>
self.handle_variables(args)
.map(|r| ResponseBody::variables(r)),
RequestArguments::evaluate(args) =>
self.handle_evaluate(args),
RequestArguments::setVariable(args) =>
self.handle_set_variable(args)
.map(|r| ResponseBody::setVariable(r)),
RequestArguments::pause(args) =>
self.handle_pause(args)
.map(|_| ResponseBody::pause),
RequestArguments::continue_(args) =>
self.handle_continue(args)
.map(|r| ResponseBody::continue_(r)),
RequestArguments::next(args) =>
self.handle_next(args)
.map(|_| ResponseBody::next),
RequestArguments::stepIn(args) =>
self.handle_step_in(args)
.map(|_| ResponseBody::stepIn),
RequestArguments::stepOut(args) =>
self.handle_step_out(args)
.map(|_| ResponseBody::stepOut),
RequestArguments::stepBack(args) =>
self.handle_step_back(args)
.map(|_| ResponseBody::stepBack),
RequestArguments::reverseContinue(args) =>
self.handle_reverse_continue(args)
.map(|_| ResponseBody::reverseContinue),
RequestArguments::source(args) =>
self.handle_source(args)
.map(|r| ResponseBody::source(r)),
RequestArguments::completions(args) =>
self.handle_completions(args)
.map(|r| ResponseBody::completions(r)),
RequestArguments::gotoTargets(args) =>
self.handle_goto_targets(args)
.map(|r| ResponseBody::gotoTargets(r)),
RequestArguments::goto(args) =>
self.handle_goto(args)
.map(|_| ResponseBody::goto),
RequestArguments::restartFrame(args) =>
self.handle_restart_frame(args)
.map(|_| ResponseBody::restartFrame),
RequestArguments::dataBreakpointInfo(args) =>
self.handle_data_breakpoint_info(args)
.map(|r| ResponseBody::dataBreakpointInfo(r)),
RequestArguments::setDataBreakpoints(args) =>
self.handle_set_data_breakpoints(args)
.map(|r| ResponseBody::setDataBreakpoints(r)),
RequestArguments::readMemory(args) =>
self.handle_read_memory(args)
.map(|r| ResponseBody::readMemory(r)),
RequestArguments::_symbols(args) =>
self.handle_symbols(args)
.map(|r| ResponseBody::_symbols(r)),
_=> bail!("Not implemented.")
}
}
}
}
}
fn send_response(&self, request_seq: u32, result: Result<ResponseBody, Error>) {
let response = match result {
Ok(body) => Response {
request_seq: request_seq,
success: true,
result: ResponseResult::Success {
body: body,
},
},
Err(err) => {
let message = if let Some(user_err) = err.downcast_ref::<crate::error::UserError>() {
format!("{}", user_err)
} else {
format!("Internal debugger error: {}", err)
};
error!("{}", message);
Response {
request_seq: request_seq,
success: false,
result: ResponseResult::Error {
command: "".into(),
message: message,
show_user: Some(true),
},
}
}
};
log_errors!(self.dap_session.try_send_response(response));
}
fn send_event(&self, event_body: EventBody) {
log_errors!(self.dap_session.try_send_event(event_body));
}
fn console_message(&self, output: impl std::fmt::Display) {
self.console_message_nonl(format!("{}\n", output));
}
fn console_message_nonl(&self, output: impl std::fmt::Display) {
self.send_event(EventBody::output(OutputEventBody {
output: format!("{}", output),
..Default::default()
}));
}
fn console_error(&self, output: impl std::fmt::Display) {
self.send_event(EventBody::output(OutputEventBody {
output: format!("{}\n", output),
category: Some("stderr".into()),
..Default::default()
}));
}
fn handle_initialize(&mut self, args: InitializeRequestArguments) -> Result<Capabilities, Error> {
self.event_listener.start_listening_for_event_class(&self.debugger, SBTarget::broadcaster_class_name(), !0);
self.event_listener.start_listening_for_event_class(&self.debugger, SBProcess::broadcaster_class_name(), !0);
self.event_listener.start_listening_for_event_class(&self.debugger, SBThread::broadcaster_class_name(), !0);
self.client_caps = Initialized(args);
Ok(self.make_capabilities())
}
fn make_capabilities(&self) -> Capabilities {
Capabilities {
supports_configuration_done_request: Some(true),
supports_function_breakpoints: Some(true),
supports_conditional_breakpoints: Some(true),
supports_hit_conditional_breakpoints: Some(true),
supports_set_variable: Some(true),
supports_goto_targets_request: Some(true),
supports_delayed_stack_trace_loading: Some(true),
support_terminate_debuggee: Some(true),
supports_log_points: Some(true),
supports_data_breakpoints: Some(true),
supports_restart_frame: Some(true),
supports_cancel_request: Some(true),
supports_read_memory_request: Some(true),
supports_evaluate_for_hovers: Some(self.evaluate_for_hovers),
supports_completions_request: Some(self.command_completions),
exception_breakpoint_filters: Some(self.get_exception_filters(&self.source_languages)),
..Default::default()
}
}
fn get_exception_filters(&self, source_langs: &[String]) -> Vec<ExceptionBreakpointsFilter> {
let mut filters = vec![];
if source_langs.iter().any(|x| x == "cpp") {
filters.push(ExceptionBreakpointsFilter {
filter: "cpp_throw".into(),
label: "C++: on throw".into(),
default: Some(true),
..Default::default()
});
filters.push(ExceptionBreakpointsFilter {
filter: "cpp_catch".into(),
label: "C++: on catch".into(),
default: Some(false),
..Default::default()
});
}
if source_langs.iter().any(|x| x == "rust") {
filters.push(ExceptionBreakpointsFilter {
filter: "rust_panic".into(),
label: "Rust: on panic".into(),
default: Some(true),
..Default::default()
});
}
filters
}
fn handle_launch(&mut self, args: LaunchRequestArguments) -> Result<ResponseBody, Error> {
self.common_init_session(&args.common)?;
if let Some(true) = &args.custom {
self.handle_custom_launch(args)
} else {
let program = match &args.program {
Some(program) => program,
None => bail!(as_user_error("\"program\" property is required for launch")),
};
self.no_debug = args.no_debug.unwrap_or(false);
self.target = Initialized(self.create_target_from_program(program)?);
self.disassembly = Initialized(disassembly::AddressSpace::new(&self.target));
self.send_event(EventBody::initialized);
let term_fut = self.create_terminal(&args);
let config_done_fut = self.wait_for_configuration_done();
let self_ref = self.self_ref.clone();
let fut = async move {
drop(tokio::join!(term_fut, config_done_fut));
self_ref.map(|s| s.complete_launch(args)).await
};
Err(AsyncResponse(Box::new(fut)).into())
}
}
fn wait_for_configuration_done(&self) -> impl Future<Output = Result<(), Error>> {
let result = self.dap_session.subscribe_requests();
async move {
let mut receiver = result?;
while let Ok(request) = receiver.recv().await {
match request.command {
Command::Known(RequestArguments::configurationDone(_)) => {
return Ok(());
}
_ => {}
}
}
bail!("Did not receive configurationDone");
}
}
fn complete_launch(&mut self, args: LaunchRequestArguments) -> Result<ResponseBody, Error> {
let mut launch_info = self.target.launch_info();
let mut launch_env: HashMap<String, String> = HashMap::new();
let mut fold_case = make_case_folder();
let inherit_env = match self.debugger.get_variable("target.inherit-env").string_at_index(0) {
Some("true") => true,
_ => false,
};
// Init with host environment if `inherit-env` is set.
if inherit_env {
for (k, v) in env::vars() {
launch_env.insert(fold_case(&k), v);
}
}
if let Some(ref env) = args.env {
for (k, v) in env.iter() {
launch_env.insert(fold_case(k), v.into());
}
}
let launch_env = launch_env.iter().map(|(k, v)| format!("{}={}", k, v)).collect::<Vec<String>>();
launch_info.set_environment_entries(launch_env.iter().map(|s| s.as_ref()), false);
if let Some(ref args) = args.args {
launch_info.set_arguments(args.iter().map(|a| a.as_ref()), false);
}
if let Some(ref cwd) = args.cwd {
launch_info.set_working_directory(Path::new(&cwd));
}
if let Some(true) = args.common.stop_on_entry {
launch_info.set_launch_flags(launch_info.launch_flags() | LaunchFlag::StopAtEntry);
}
self.configure_stdio(&args, &mut launch_info)?;
self.target.set_launch_info(&launch_info);
// Run user commands (which may modify launch info)
if let Some(ref commands) = args.common.pre_run_commands {
self.exec_commands("preRunCommands", commands)?;
}
// Grab updated launch info.
let launch_info = self.target.launch_info();
// Announce the final launch command line
let executable = self.target.executable().path().to_string_lossy().into_owned();
let command_line = launch_info.arguments().fold(executable, |mut args, a| {
args.push(' ');
args.push_str(a);
args
});
self.console_message(format!("Launching: {}", command_line));
#[cfg(target_os = "linux")]
{
// The personality() syscall is often restricted inside Docker containers, which causes launch failure with a cryptic error.
// Test if ASLR can be disabled and turn DisableASLR off if so.
let flags = launch_info.launch_flags();
if flags.contains(LaunchFlag::DisableASLR) {
unsafe {
const ADDR_NO_RANDOMIZE: libc::c_ulong = 0x0040000;
let previous = libc::personality(0xffffffff) as libc::c_ulong;
if libc::personality(previous | ADDR_NO_RANDOMIZE) < 0 {
launch_info.set_launch_flags(flags - LaunchFlag::DisableASLR);
self.console_error("Could not disable address space layout randomization (ASLR).");
self.console_message("(Possibly due to running in a restricted container. \
Add \"initCommands\":[\"settings set target.disable-aslr false\"] to the launch configuration \
to suppress this warning.)",
);
}
libc::personality(previous);
}
}
}
launch_info.set_listener(&self.event_listener);
let result = match &self.debuggee_terminal {
Some(t) => t.attach(|| self.target.launch(&launch_info)),
None => self.target.launch(&launch_info),
};
let process = match result {
Ok(process) => process,
Err(err) => {
let mut msg = err.to_string();
if let Some(work_dir) = launch_info.working_directory() {
if self.target.platform().get_file_permissions(work_dir) == 0 {
#[rustfmt::skip]
log_errors!(write!(msg,
"\n\nPossible cause: the working directory \"{}\" is missing or inaccessible.",
work_dir.display()
));
}
}
bail!(as_user_error(msg))
}
};
self.console_message(format!("Launched process {}", process.process_id()));
self.process = Initialized(process);
self.terminate_on_disconnect = true;
// LLDB sometimes loses the initial stop event.
if launch_info.launch_flags().intersects(LaunchFlag::StopAtEntry) {
self.notify_process_stopped();
}
if let Some(commands) = args.common.post_run_commands {
self.exec_commands("postRunCommands", &commands)?;
}
self.exit_commands = args.common.exit_commands;
Ok(ResponseBody::launch)
}
fn handle_custom_launch(&mut self, args: LaunchRequestArguments) -> Result<ResponseBody, Error> {
if let Some(commands) = &args.target_create_commands {
self.exec_commands("targetCreateCommands", &commands)?;
}
self.target = Initialized(self.debugger.selected_target());
self.disassembly = Initialized(disassembly::AddressSpace::new(&self.target));
self.send_event(EventBody::initialized);
let self_ref = self.self_ref.clone();
let fut = async move {
self_ref.map(|s| s.wait_for_configuration_done()).await.await?;
self_ref.map(|s| s.complete_custom_launch(args)).await
};
Err(AsyncResponse(Box::new(fut)).into())
}
fn complete_custom_launch(&mut self, args: LaunchRequestArguments) -> Result<ResponseBody, Error> {
if let Some(commands) = args.process_create_commands.as_ref().or(args.common.pre_run_commands.as_ref()) {
self.exec_commands("processCreateCommands", &commands)?;
}
self.process = Initialized(self.target.process());
self.process.broadcaster().add_listener(&self.event_listener, !0);
self.terminate_on_disconnect = true;
// This is succeptible to race conditions, but probably the best we can do.
if self.process.state().is_stopped() {
self.notify_process_stopped();
}
self.exit_commands = args.common.exit_commands;
Ok(ResponseBody::launch)
}
fn handle_attach(&mut self, args: AttachRequestArguments) -> Result<ResponseBody, Error> {
self.common_init_session(&args.common)?;
if args.program.is_none() && args.pid.is_none() {
bail!(as_user_error(r#"Either "program" or "pid" is required for attach."#));
}
self.target = Initialized(self.debugger.create_target("", None, None, false)?);
self.disassembly = Initialized(disassembly::AddressSpace::new(&self.target));
self.send_event(EventBody::initialized);
let self_ref = self.self_ref.clone();
let fut = async move {
self_ref.map(|s| s.wait_for_configuration_done()).await.await?;
self_ref.map(|s| s.complete_attach(args)).await
};
Err(AsyncResponse(Box::new(fut)).into())
}
fn complete_attach(&mut self, args: AttachRequestArguments) -> Result<ResponseBody, Error> {
if let Some(ref commands) = args.common.pre_run_commands {
self.exec_commands("preRunCommands", commands)?;
}
let attach_info = SBAttachInfo::new();
if let Some(pid) = args.pid {
let pid = match pid {
Pid::Number(n) => n as ProcessID,
Pid::String(s) => match s.parse() {
Ok(n) => n,
Err(_) => bail!(as_user_error("Process id must be a positive integer.")),
},
};
attach_info.set_process_id(pid);
} else if let Some(program) = args.program {
attach_info.set_executable(&self.find_executable(&program));
} else {
unreachable!()
}
attach_info.set_wait_for_launch(args.wait_for.unwrap_or(false), false);
attach_info.set_ignore_existing(false);
attach_info.set_listener(&self.event_listener);
let process = match self.target.attach(&attach_info) {
Ok(process) => process,
Err(err) => bail!(as_user_error(err)),
};
self.console_message(format!("Attached to process {}", process.process_id()));
self.process = Initialized(process);
self.terminate_on_disconnect = false;
if args.common.stop_on_entry.unwrap_or(false) {
self.notify_process_stopped();
} else {
log_errors!(self.process.resume());
}
if let Some(commands) = args.common.post_run_commands {
self.exec_commands("postRunCommands", &commands)?;
}
self.exit_commands = args.common.exit_commands;
Ok(ResponseBody::attach)
}
fn create_target_from_program(&self, program: &str) -> Result<SBTarget, Error> {
match self.debugger.create_target(program, None, None, false) {
Ok(target) => Ok(target),
Err(err) => {
// TODO: use selected platform instead of cfg!(windows)
if cfg!(windows) && !program.ends_with(".exe") {
let program = format!("{}.exe", program);
self.debugger.create_target(&program, None, None, false)
} else {
Err(err)
}
}
}
.map_err(|e| as_user_error(e).into())
}
fn find_executable<'a>(&self, program: &'a str) -> Cow<'a, str> {
// On Windows, also try program + '.exe'
// TODO: use selected platform instead of cfg!(windows)
if cfg!(windows) {
if !Path::new(program).is_file() {
let program = format!("{}.exe", program);
if Path::new(&program).is_file() {
return program.into();
}
}
}
program.into()
}
fn create_terminal(&mut self, args: &LaunchRequestArguments) -> impl Future {
if self.target.platform().name() != "host" {
return future::ready(()).left_future(); // Can't attach to a terminal when remote-debugging.
}
let terminal_kind = match args.terminal {
Some(kind) => kind,
None => match args.console {
Some(ConsoleKind::InternalConsole) => TerminalKind::Console,
Some(ConsoleKind::ExternalTerminal) => TerminalKind::External,
Some(ConsoleKind::IntegratedTerminal) => TerminalKind::Integrated,
None => TerminalKind::Integrated,
},
};
let terminal_kind = match terminal_kind {
TerminalKind::Console => return future::ready(()).left_future(),
TerminalKind::External => "external",
TerminalKind::Integrated => "integrated",
};
let title = args.common.name.as_deref().unwrap_or("Debug").to_string();
let fut = Terminal::create(terminal_kind, title, self.terminal_prompt_clear.clone(), self.dap_session.clone());
let self_ref = self.self_ref.clone();
async move {
let result = fut.await;
self_ref
.map(|s| match result {
Ok(terminal) => s.debuggee_terminal = Some(terminal),
Err(err) => s.console_error(format!(
"Failed to redirect stdio to a terminal. ({})\nDebuggee output will appear here.",
err
)),
})
.await
}
.right_future()
}
fn configure_stdio(&mut self, args: &LaunchRequestArguments, launch_info: &mut SBLaunchInfo) -> Result<(), Error> {
let mut stdio = match args.stdio {
None => vec![],
Some(Either::First(ref stdio)) => vec![Some(stdio.clone())], // A single string
Some(Either::Second(ref stdio)) => stdio.clone(), // List of strings
};
// Pad to at least 3 entries
while stdio.len() < 3 {
stdio.push(None)
}
if let Some(terminal) = &self.debuggee_terminal {
for (fd, name) in stdio.iter().enumerate() {
// Use file name specified in the launch config if available,
// otherwise use the appropriate terminal device name.
let name = match (name, fd) {
(Some(name), _) => name,
(None, 0) => terminal.input_devname(),
(None, _) => terminal.output_devname(),
};
let _ = match fd {
0 => launch_info.add_open_file_action(fd as i32, name, true, false),
1 => launch_info.add_open_file_action(fd as i32, name, false, true),
2 => launch_info.add_open_file_action(fd as i32, name, false, true),
_ => launch_info.add_open_file_action(fd as i32, name, true, true),
};
}
}
Ok(())
}
// Handle initialization tasks common to both launching and attaching
fn common_init_session(&mut self, args_common: &CommonLaunchFields) -> Result<(), Error> {
if let Some(expressions) = args_common.expressions {
self.default_expr_type = expressions;
}
if let None = self.python {
match self.default_expr_type {
Expressions::Simple | Expressions::Python => self.console_error(
"Could not initialize Python interpreter - some features will be unavailable (e.g. debug visualizers).",
),
Expressions::Native => (),
}
self.default_expr_type = Expressions::Native;
}
if let Some(source_map) = &args_common.source_map {
self.init_source_map(source_map.iter().map(|(k, v)| (k, v.as_ref())));
}
if let Some(true) = &args_common.reverse_debugging {
self.send_event(EventBody::capabilities(CapabilitiesEventBody {
capabilities: Capabilities {
supports_step_back: Some(true),
..Default::default()
},
}));
}
self.relative_path_base = Initialized(match &args_common.relative_path_base {
Some(base) => base.into(),
None => env::current_dir()?,
});
if let Some(ref settings) = args_common.adapter_settings {
self.update_adapter_settings_and_caps(settings);
}
if let Some(commands) = &args_common.init_commands {
self.exec_commands("initCommands", &commands)?;
}
Ok(())
}
fn exec_commands(&self, script_name: &str, commands: &[String]) -> Result<(), Error> {
self.console_message(format!("Executing script: {}", script_name));
let interpreter = self.debugger.command_interpreter();
let mut result = SBCommandReturnObject::new();
for command in commands {
result.clear();
let ok = interpreter.handle_command(&command, &mut result, false);
debug!("{} -> {:?}, {:?}", command, ok, result);
let output = result.output().to_string_lossy().into_owned();
if !output.is_empty() {
self.console_message(output);
}
if !result.succeeded() {
let err = result.error().to_string_lossy().into_owned();
self.console_error(err.clone());
bail!(as_user_error(err))
}
}
Ok(())
}
fn init_source_map<S>(&mut self, source_map: impl IntoIterator<Item = (S, Option<S>)>)
where
S: AsRef<str>,
{
fn escape(s: &str) -> String {
s.replace("\\", "\\\\").replace("\"", "\\\"")
}
let mut args = String::new();
for (remote, local) in source_map.into_iter() {
let remote_escaped = escape(remote.as_ref());
let local_escaped = match local {
None => String::new(),
Some(s) => escape(s.as_ref()),
};
args.push_str("\"");
args.push_str(&remote_escaped);
args.push_str("\" \"");
args.push_str(&local_escaped);
args.push_str("\" ");
}
if !args.is_empty() {
info!("Set target.source-map args: {}", args);
if let Err(error) = self.debugger.set_variable("target.source-map", &args) {
self.console_error(format!("Could not set source map: {}", error.error_string()))
}
}
}
fn handle_configuration_done(&mut self) -> Result<(), Error> {
self.target.broadcaster().add_listener(
&self.event_listener,
SBTargetEvent::BroadcastBitBreakpointChanged | SBTargetEvent::BroadcastBitModulesLoaded,
);
Ok(())
}
fn handle_threads(&mut self) -> Result<ThreadsResponseBody, Error> {
if !self.process.is_initialized() {
// VSCode may send `threads` request after a failed launch.
return Ok(ThreadsResponseBody {
threads: vec![],
});
}
let mut response = ThreadsResponseBody {
threads: vec![],
};
for thread in self.process.threads() {
let mut descr = format!("{}: tid={}", thread.index_id(), thread.thread_id());
if let Some(name) = thread.name() {
log_errors!(write!(descr, " \"{}\"", name));
}
response.threads.push(Thread {
id: thread.thread_id() as i64,
name: descr,
});
}
Ok(response)
}
fn handle_stack_trace(&mut self, args: StackTraceArguments) -> Result<StackTraceResponseBody, Error> {
let thread = match self.process.thread_by_id(args.thread_id as ThreadID) {
Some(thread) => thread,
None => {
error!("Received invalid thread id in stack trace request.");
bail!("Invalid thread id.");
}
};
let start_frame = args.start_frame.unwrap_or(0);
let levels = args.levels.unwrap_or(std::i64::MAX);
let mut stack_frames = vec![];
for i in start_frame..(start_frame + levels) {
let frame = thread.frame_at_index(i as u32);
if !frame.is_valid() {
break;
}
let key = format!("[{},{}]", thread.index_id(), i);
let handle = self.var_refs.create(None, &key, Container::StackFrame(frame.clone()));
let mut stack_frame: StackFrame = Default::default();
stack_frame.id = handle.get() as i64;
let pc_address = frame.pc_address();
stack_frame.name = if let Some(name) = frame.function_name() {
name.to_owned()
} else {
format!("{:X}", pc_address.file_address())
};
if !self.in_disassembly(&frame) {
if let Some(le) = frame.line_entry() {
let fs = le.file_spec();
if let Some(local_path) = self.map_filespec_to_local(&fs) {
stack_frame.line = le.line() as i64;
stack_frame.column = le.column() as i64;
stack_frame.source = Some(Source {
name: Some(local_path.file_name().unwrap().to_string_lossy().into_owned()),
path: Some(local_path.to_string_lossy().into_owned()),
..Default::default()
});
}
}
} else {
let pc_addr = frame.pc();
let dasm = self.disassembly.from_address(pc_addr)?;
stack_frame.line = dasm.line_num_by_address(pc_addr) as i64;
stack_frame.column = 0;
stack_frame.source = Some(Source {
name: Some(dasm.source_name().to_owned()),
source_reference: Some(handles::to_i64(Some(dasm.handle()))),
..Default::default()
});
stack_frame.presentation_hint = Some("subtle".to_owned());
}
stack_frames.push(stack_frame);
}
Ok(StackTraceResponseBody {
stack_frames: stack_frames,
total_frames: Some(thread.num_frames() as i64),
})
}
fn in_disassembly(&mut self, frame: &SBFrame) -> bool {
match self.show_disassembly {
ShowDisassembly::Always => true,
ShowDisassembly::Never => false,
ShowDisassembly::Auto => {
if let Some(le) = frame.line_entry() {
self.map_filespec_to_local(&le.file_spec()).is_none()
} else {
true
}
}
}
}
fn handle_pause(&mut self, _args: PauseArguments) -> Result<(), Error> {
match self.process.stop() {
Ok(()) => Ok(()),
Err(error) => {
let state = self.process.state();
if !state.is_running() {
// Did we lose a 'stopped' event?
self.notify_process_stopped();
Ok(())
} else {
bail!(as_user_error(error));
}
}
}
}
fn handle_continue(&mut self, _args: ContinueArguments) -> Result<ContinueResponseBody, Error> {
self.before_resume();
match self.process.resume() {
Ok(()) => Ok(ContinueResponseBody {
all_threads_continued: Some(true),
}),
Err(err) => {
if self.process.state().is_running() {
// Did we lose a 'running' event?
self.notify_process_running();
Ok(ContinueResponseBody {
all_threads_continued: Some(true),
})
} else {
bail!(as_user_error(err))
}
}
}
}
fn handle_next(&mut self, args: NextArguments) -> Result<(), Error> {
let thread = match self.process.thread_by_id(args.thread_id as ThreadID) {
Some(thread) => thread,
None => {
error!("Received invalid thread id in step request.");
bail!("Invalid thread id.");
}
};
self.before_resume();
let frame = thread.frame_at_index(0);
if !self.in_disassembly(&frame) {
thread.step_over(RunMode::OnlyDuringStepping);
} else {
thread.step_instruction(true);
}
Ok(())
}
fn handle_step_in(&mut self, args: StepInArguments) -> Result<(), Error> {
let thread = match self.process.thread_by_id(args.thread_id as ThreadID) {
Some(thread) => thread,
None => {
error!("Received invalid thread id in step-in request.");
bail!("Invalid thread id.")
}
};
self.before_resume();
let frame = thread.frame_at_index(0);
if !self.in_disassembly(&frame) {
thread.step_into(RunMode::OnlyDuringStepping);
} else {
thread.step_instruction(false);
}
Ok(())
}
fn handle_step_out(&mut self, args: StepOutArguments) -> Result<(), Error> {
self.before_resume();
let thread = self.process.thread_by_id(args.thread_id as ThreadID).ok_or("thread_id")?;
thread.step_out();
Ok(())
}
fn handle_step_back(&mut self, args: StepBackArguments) -> Result<(), Error> {
self.before_resume();
self.show_disassembly = ShowDisassembly::Always; // Reverse line-step is not supported, so we switch to disassembly mode.
self.reverse_exec(&[
&format!("process plugin packet send Hc{:x}", args.thread_id), // select thread
"process plugin packet send bs", // reverse-step
"process plugin packet send bs", // reverse-step so we can forward step
"stepi", // forward-step to refresh LLDB's cached debuggee state
])
}
fn handle_reverse_continue(&mut self, args: ReverseContinueArguments) -> Result<(), Error> {
self.before_resume();
self.reverse_exec(&[
&format!("process plugin packet send Hc{:x}", args.thread_id), // select thread
"process plugin packet send bc", // reverse-continue
"process plugin packet send bs", // reverse-step so we can forward step
"stepi", // forward-step to refresh LLDB's cached debuggee state
])
}
fn reverse_exec(&mut self, commands: &[&str]) -> Result<(), Error> {
let interp = self.debugger.command_interpreter();
let mut result = SBCommandReturnObject::new();
for command in commands {
interp.handle_command(&command, &mut result, false);
if !result.succeeded() {
let error = into_string_lossy(result.error());
self.console_error(error.clone());
bail!(error);
}
}
Ok(())
}
fn handle_source(&mut self, args: SourceArguments) -> Result<SourceResponseBody, Error> {
let handle = handles::from_i64(args.source_reference)?;
let dasm = self.disassembly.find_by_handle(handle).unwrap();
Ok(SourceResponseBody {
content: dasm.get_source_text(),
mime_type: Some("text/x-lldb.disassembly".to_owned()),
})
}
fn handle_completions(&mut self, args: CompletionsArguments) -> Result<CompletionsResponseBody, Error> {
if !self.command_completions {
bail!("Completions are disabled");
}
let (text, cursor_column) = match self.console_mode {
ConsoleMode::Commands => (&args.text[..], args.column - 1),
ConsoleMode::Evaluate => {
if args.text.starts_with('`') {
(&args.text[1..], args.column - 2)
} else if args.text.starts_with("/cmd ") {
(&args.text[5..], args.column - 6)
} else {
// TODO: expression completions
return Ok(CompletionsResponseBody {
targets: vec![],
});
}
}
};
// Work around LLDB crash when text starts with non-alphabetic character.
if let Some(c) = text.chars().next() {
if !c.is_alphabetic() {
return Ok(CompletionsResponseBody {
targets: vec![],
});
}
}
// Compute cursor position inside text in as byte offset.
let cursor_index = text.char_indices().skip(cursor_column as usize).next().map(|p| p.0).unwrap_or(text.len());
let interpreter = self.debugger.command_interpreter();
let targets = match interpreter.handle_completions(text, cursor_index as u32, None) {
None => vec![],
Some((common_continuation, completions)) => {
// LLDB completions usually include some prefix of the string being completed, without telling us what that prefix is.
// For example, completing "set show tar" might return ["target.arg0", "target.auto-apply-fixits", ...].
// Take a slice up to the cursor, split it on whitespaces, then get the last part.
// This is the (likely) prefix of completions returned by LLDB.
let prefix = &text[..cursor_index].split_whitespace().next_back().unwrap_or_default();
let prefix_len = prefix.chars().count();
let extended_prefix = format!("{}{}", prefix, common_continuation);
let mut targets = vec![];
for completion in completions {
// Check if we guessed prefix correctly
let item = if completion.starts_with(&extended_prefix) {
CompletionItem {
label: completion,
start: Some(args.column - prefix_len as i64),
length: Some(prefix_len as i64),
..Default::default()
}
} else {
// Let VSCode apply its own heuristics to figure out the prefix.
CompletionItem {
label: completion,
..Default::default()
}
};
targets.push(item);
}
targets
}
};
Ok(CompletionsResponseBody {
targets,
})
}
fn handle_goto_targets(&mut self, args: GotoTargetsArguments) -> Result<GotoTargetsResponseBody, Error> {
let targets = vec![GotoTarget {
id: 1,
label: format!("line {}", args.line),
line: args.line,
end_line: None,
column: None,
end_column: None,
instruction_pointer_reference: None,
}];
self.last_goto_request = Some(args);
Ok(GotoTargetsResponseBody {
targets,
})
}
fn handle_goto(&mut self, args: GotoArguments) -> Result<(), Error> {
match &self.last_goto_request {
None => bail!("Unexpected goto message."),
Some(ref goto_args) => {
let thread_id = args.thread_id as u64;
match self.process.thread_by_id(thread_id) {
None => bail!("Invalid thread id"),
Some(thread) => match goto_args.source.source_reference {
// Disassembly
Some(source_ref) => {
let handle = handles::from_i64(source_ref)?;
let dasm = self.disassembly.find_by_handle(handle).ok_or("source_ref")?;
let addr = dasm.address_by_line_num(goto_args.line as u32);
let frame = thread.frame_at_index(0).check().ok_or("frame 0")?;
if frame.set_pc(addr) {
self.refresh_client_display(Some(thread_id));
Ok(())
} else {
bail!(as_user_error("Failed to set the instruction pointer."));
}
}
// Normal source file
None => {
let filespec = SBFileSpec::from(goto_args.source.path.as_ref().ok_or("source.path")?);
match thread.jump_to_line(&filespec, goto_args.line as u32) {
Ok(()) => {
self.last_goto_request = None;
self.refresh_client_display(Some(thread_id));
Ok(())
}
Err(err) => {
bail!(as_user_error(err))
}
}
}
},
}
}
}
}
fn handle_restart_frame(&mut self, args: RestartFrameArguments) -> Result<(), Error> {
let handle = handles::from_i64(args.frame_id)?;
let frame = match self.var_refs.get(handle) {
Some(Container::StackFrame(ref f)) => f.clone(),
_ => bail!("Invalid frameId"),
};
let thread = frame.thread();
thread.return_from_frame(&frame)?;
self.send_event(EventBody::stopped(StoppedEventBody {
thread_id: Some(thread.thread_id() as i64),
all_threads_stopped: Some(true),
reason: "restart".into(),
..Default::default()
}));
Ok(())
}
fn handle_data_breakpoint_info(
&mut self,
args: DataBreakpointInfoArguments,
) -> Result<DataBreakpointInfoResponseBody, Error> {
let container_handle = handles::from_i64(args.variables_reference.ok_or("variables_reference")?)?;
let container = self.var_refs.get(container_handle).expect("Invalid variables reference");
let child = match container {
Container::SBValue(container) => container.child_member_with_name(&args.name),
Container::Locals(frame) => frame.find_variable(&args.name),
Container::Globals(frame) => frame.find_value(&args.name, ValueType::VariableGlobal),
Container::Statics(frame) => frame.find_value(&args.name, ValueType::VariableStatic),
_ => None,
};
if let Some(child) = child {
let addr = child.load_address();
if addr != lldb::INVALID_ADDRESS {
let size = child.byte_size();
if self.is_valid_watchpoint_size(size) {
let data_id = format!("{}/{}", addr, size);
let desc = child.name().unwrap_or("");
Ok(DataBreakpointInfoResponseBody {
data_id: Some(data_id),
access_types: Some(vec![
DataBreakpointAccessType::Read,
DataBreakpointAccessType::Write,
DataBreakpointAccessType::ReadWrite,
]),
description: format!("{} bytes at {:X} ({})", size, addr, desc),
..Default::default()
})
} else {
Ok(DataBreakpointInfoResponseBody {
data_id: None,
description: "Invalid watchpoint size.".into(),
..Default::default()
})
}
} else {
Ok(DataBreakpointInfoResponseBody {
data_id: None,
description: "This variable doesn't have an address.".into(),
..Default::default()
})
}
} else {
Ok(DataBreakpointInfoResponseBody {
data_id: None,
description: "Variable not found.".into(),
..Default::default()
})
}
}
fn is_valid_watchpoint_size(&self, size: usize) -> bool {
let addr_size = self.target.address_byte_size();
match addr_size {
4 => match size {
1 | 2 | 4 => true,
_ => false,
},
8 => match size {
1 | 2 | 4 | 8 => true,
_ => false,
},
_ => true, // No harm in allowing to set an invalid watchpoint, other than user confusion.
}
}
fn handle_set_data_breakpoints(
&mut self,
args: SetDataBreakpointsArguments,
) -> Result<SetDataBreakpointsResponseBody, Error> {
self.target.delete_all_watchpoints();
let mut watchpoints = vec![];
for wp in args.breakpoints {
let mut parts = wp.data_id.split('/');
let addr = parts.next().ok_or("")?.parse::<u64>()?;
let size = parts.next().ok_or("")?.parse::<usize>()?;
let (read, write) = match wp.access_type {
None => (false, true),
Some(DataBreakpointAccessType::Read) => (true, false),
Some(DataBreakpointAccessType::Write) => (false, true),
Some(DataBreakpointAccessType::ReadWrite) => (true, true),
};
let when = match (read, write) {
(true, false) => "read",
(false, true) => "write",
(true, true) => "read and write",
_ => unreachable!(),
};
let res = match self.target.watch_address(addr, size, read, write) {
Ok(_wp) => Breakpoint {
verified: true,
message: Some(format!("Break on {}", when)),
..Default::default()
},
Err(err) => Breakpoint {
verified: false,
message: Some(err.to_string()),
..Default::default()
},
};
watchpoints.push(res);
}
Ok(SetDataBreakpointsResponseBody {
breakpoints: watchpoints,
})
}
fn handle_disconnect(&mut self, args: Option<DisconnectArguments>) -> Result<(), Error> {
if let Some(commands) = &self.exit_commands {
self.exec_commands("exitCommands", &commands)?;
}
// Let go of the terminal helper connection
self.debuggee_terminal = None;
if let Initialized(ref process) = self.process {
let state = process.state();
if state.is_alive() {
let terminate = match args {
Some(args) => match args.terminate_debuggee {
Some(terminate) => terminate,
None => self.terminate_on_disconnect,
},
None => self.terminate_on_disconnect,
};
if terminate {
process.kill()?;
} else {
process.detach()?;
}
}
}
Ok(())
}
fn handle_read_memory(&mut self, args: ReadMemoryArguments) -> Result<ReadMemoryResponseBody, Error> {
let mem_ref = parse_int::parse::<i64>(&args.memory_reference)?;
let offset = args.offset.unwrap_or(0);
let count = args.count as usize;
let address = (mem_ref + offset) as lldb::Address;
let mut buffer = Vec::with_capacity(count);
buffer.resize(count, 0);
let bytes_read = self.process.read_memory(address, buffer.as_mut_slice())?;
buffer.truncate(bytes_read);
Ok(ReadMemoryResponseBody {
address: format!("0x{:X}", address),
unreadable_bytes: Some((count - bytes_read) as i64),
data: Some(base64::encode(buffer)),
})
}
fn handle_symbols(&mut self, args: SymbolsRequest) -> Result<SymbolsResponse, Error> {
let (mut next_module, mut next_symbol) = match args.continuation_token {
Some(token) => (token.next_module, token.next_symbol),
None => (0, 0),
};
let num_modules = self.target.num_modules();
let mut symbols = vec![];
while next_module < num_modules {
let module = self.target.module_at_index(next_module);
let num_symbols = module.num_symbols();
while next_symbol < num_symbols {
let symbol = module.symbol_at_index(next_symbol);
let ty = symbol.type_();
match ty {
SymbolType::Code | SymbolType::Data => {
let start_addr = symbol.start_address().load_address(&self.target);
symbols.push(Symbol {
name: symbol.display_name().into(),
type_: format!("{:?}", ty),
address: format!("0x{:X}", start_addr),
});
}
_ => {}
}
next_symbol += 1;
if symbols.len() > 1000 {
return Ok(SymbolsResponse {
symbols,
continuation_token: Some(SymbolsContinuation {
next_module,
next_symbol,
}),
});
}
}
next_symbol = 0;
next_module += 1;
}
Ok(SymbolsResponse {
symbols,
continuation_token: None,
})
}
fn handle_adapter_settings(&mut self, args: AdapterSettings) -> Result<(), Error> {
self.update_adapter_settings_and_caps(&args);
if self.process.state().is_stopped() {
self.refresh_client_display(None);
}
Ok(())
}
fn update_adapter_settings_and_caps(&mut self, settings: &AdapterSettings) {
self.update_adapter_settings(&settings);
if settings.evaluate_for_hovers.is_some()
|| settings.command_completions.is_some()
|| settings.source_languages.is_some()
{
self.send_event(EventBody::capabilities(CapabilitiesEventBody {
capabilities: self.make_capabilities(),
}));
}
}
fn update_adapter_settings(&mut self, settings: &AdapterSettings) {
self.global_format = match settings.display_format {
None => self.global_format,
Some(DisplayFormat::Auto) => Format::Default,
Some(DisplayFormat::Decimal) => Format::Decimal,
Some(DisplayFormat::Hex) => Format::Hex,
Some(DisplayFormat::Binary) => Format::Binary,
};
self.show_disassembly = settings.show_disassembly.unwrap_or(self.show_disassembly);
self.deref_pointers = settings.dereference_pointers.unwrap_or(self.deref_pointers);
self.suppress_missing_files = settings.suppress_missing_source_files.unwrap_or(self.suppress_missing_files);
self.console_mode = settings.console_mode.unwrap_or(self.console_mode);
self.evaluate_for_hovers = settings.evaluate_for_hovers.unwrap_or(self.evaluate_for_hovers);
self.command_completions = settings.command_completions.unwrap_or(self.command_completions);
if let Some(timeout) = settings.evaluation_timeout {
self.evaluation_timeout = time::Duration::from_millis((timeout * 1000.0) as u64);
}
if let Some(ref terminal_prompt_clear) = settings.terminal_prompt_clear {
self.terminal_prompt_clear = Some(terminal_prompt_clear.clone());
}
if let Some(ref source_languages) = settings.source_languages {
self.source_languages = source_languages.clone();
}
}
// Send fake stop event to force VSCode to refresh its UI state.
fn refresh_client_display(&mut self, thread_id: Option<ThreadID>) {
let thread_id = match thread_id {
Some(tid) => tid,
None => self.process.selected_thread().thread_id(),
};
self.send_event(EventBody::stopped(StoppedEventBody {
thread_id: Some(thread_id as i64),
all_threads_stopped: Some(true),
..Default::default()
}));
}
fn before_resume(&mut self) {
self.var_refs.reset();
self.selected_frame_changed = false;
}
fn handle_debug_event(&mut self, event: SBEvent) {
debug!("Debug event: {:?}", event);
if let Some(process_event) = event.as_process_event() {
self.handle_process_event(&process_event);
} else if let Some(target_event) = event.as_target_event() {
self.handle_target_event(&target_event);
} else if let Some(bp_event) = event.as_breakpoint_event() {
self.handle_breakpoint_event(&bp_event);
} else if let Some(thread_event) = event.as_thread_event() {
self.handle_thread_event(&thread_event);
}
}
fn handle_process_event(&mut self, process_event: &SBProcessEvent) {
let flags = process_event.as_event().flags();
if flags & SBProcessEvent::BroadcastBitStateChanged != 0 {
match process_event.process_state() {
ProcessState::Running | ProcessState::Stepping => self.notify_process_running(),
ProcessState::Stopped => {
if !process_event.restarted() {
self.notify_process_stopped()
}
}
ProcessState::Crashed | ProcessState::Suspended => self.notify_process_stopped(),
ProcessState::Exited => {
let exit_code = self.process.exit_status() as i64;
self.console_message(format!("Process exited with code {}.", exit_code));
self.send_event(EventBody::exited(ExitedEventBody {
exit_code,
}));
self.send_event(EventBody::terminated(TerminatedEventBody {
restart: None,
}));
}
ProcessState::Detached => {
self.console_message("Detached from debuggee.");
self.send_event(EventBody::terminated(TerminatedEventBody {
restart: None,
}));
}
_ => (),
}
}
if flags & (SBProcessEvent::BroadcastBitSTDOUT | SBProcessEvent::BroadcastBitSTDERR) != 0 {
let read_stdout = |b: &mut [u8]| self.process.read_stdout(b);
let read_stderr = |b: &mut [u8]| self.process.read_stderr(b);
let (read_stream, category): (&dyn for<'r> Fn(&mut [u8]) -> usize, &str) =
if flags & SBProcessEvent::BroadcastBitSTDOUT != 0 {
(&read_stdout, "stdout")
} else {
(&read_stderr, "stderr")
};
let mut buffer = [0; 1024];
let mut read = read_stream(&mut buffer);
while read > 0 {
self.send_event(EventBody::output(OutputEventBody {
category: Some(category.to_owned()),
output: String::from_utf8_lossy(&buffer[..read]).into_owned(),
..Default::default()
}));
read = read_stream(&mut buffer);
}
}
}
fn notify_process_running(&mut self) {
self.send_event(EventBody::continued(ContinuedEventBody {
all_threads_continued: Some(true),
thread_id: 0,
}));
}
fn notify_process_stopped(&mut self) {
// Find thread that has caused this stop
let mut stopped_thread;
// Check the currently selected thread first
let selected_thread = self.process.selected_thread();
stopped_thread = match selected_thread.stop_reason() {
StopReason::Invalid | //.
StopReason::None => None,
_ => Some(selected_thread),
};
// Fall back to scanning all threads in the process
if stopped_thread.is_none() {
for thread in self.process.threads() {
match thread.stop_reason() {
StopReason::Invalid | //.
StopReason::None => (),
_ => {
self.process.set_selected_thread(&thread);
stopped_thread = Some(thread);
break;
}
}
}
}
// Analyze stop reason
let (stop_reason_str, description) = match stopped_thread {
Some(ref stopped_thread) => {
let stop_reason = stopped_thread.stop_reason();
match stop_reason {
StopReason::Breakpoint => ("breakpoint", None),
StopReason::Trace | //.
StopReason::PlanComplete => ("step", None),
StopReason::Watchpoint => ("watchpoint", None),
StopReason::Signal => ("signal", Some(stopped_thread.stop_description())),
StopReason::Exception => ("exception", Some(stopped_thread.stop_description())),
_ => ("unknown", Some(stopped_thread.stop_description())),
}
}
None => ("unknown", None),
};
if let Some(description) = &description {
self.console_error(format!("Stop reason: {}", description));
}
self.send_event(EventBody::stopped(StoppedEventBody {
all_threads_stopped: Some(true),
thread_id: stopped_thread.map(|t| t.thread_id() as i64),
reason: stop_reason_str.to_owned(),
description: None,
text: description,
preserve_focus_hint: None,
..Default::default()
}));
if let Some(python) = &self.python {
python.modules_loaded(&mut self.loaded_modules.iter());
}
self.loaded_modules.clear();
}
fn handle_target_event(&mut self, event: &SBTargetEvent) {
let flags = event.as_event().flags();
if flags & SBTargetEvent::BroadcastBitModulesLoaded != 0 {
for module in event.modules() {
self.send_event(EventBody::module(ModuleEventBody {
reason: "new".to_owned(),
module: self.make_module_detail(&module),
}));
// Running scripts during target execution seems to trigger a bug in LLDB,
// so we defer loaded module notification till next stop.
self.loaded_modules.push(module);
}
} else if flags & SBTargetEvent::BroadcastBitSymbolsLoaded != 0 {
for module in event.modules() {
self.send_event(EventBody::module(ModuleEventBody {
reason: "changed".to_owned(),
module: self.make_module_detail(&module),
}));
}
} else if flags & SBTargetEvent::BroadcastBitModulesUnloaded != 0 {
for module in event.modules() {
self.send_event(EventBody::module(ModuleEventBody {
reason: "removed".to_owned(),
module: Module {
id: serde_json::Value::String(self.module_id(&module)),
..Default::default()
},
}));
}
}
}
fn module_id(&self, module: &SBModule) -> String {
let header_addr = module.object_header_address();
if header_addr.is_valid() {
format!("{:X}", header_addr.load_address(&self.target))
} else {
// header_addr not available on Windows, fall back to path
module.filespec().path().display().to_string()
}
}
fn make_module_detail(&self, module: &SBModule) -> Module {
let mut msg = Module {
id: serde_json::Value::String(self.module_id(&module)),
name: module.filespec().filename().display().to_string(),
path: Some(module.filespec().path().display().to_string()),
..Default::default()
};
let header_addr = module.object_header_address();
if header_addr.is_valid() {
msg.address_range = Some(format!("{:X}", header_addr.load_address(&self.target)));
}
let symbols = module.symbol_filespec();
if symbols.is_valid() {
msg.symbol_status = Some("Symbols loaded.".into());
msg.symbol_file_path = Some(module.symbol_filespec().path().display().to_string());
} else {
msg.symbol_status = Some("Symbols not found".into())
}
msg
}
fn handle_thread_event(&mut self, event: &SBThreadEvent) {
let flags = event.as_event().flags();
if flags & SBThreadEvent::BroadcastBitSelectedFrameChanged != 0 {
self.selected_frame_changed = true;
}
}
// Maps remote file path to local file path.
// The bulk of this work is done by LLDB itself (via target.source-map), in addition to which:
// - if `filespec` contains a relative path, we convert it to an absolute one using relative_path_base
// (which is normally initialized to ${workspaceFolder}) as a base.
// - we check whether the local file actually exists, and suppress it (if `suppress_missing_files` is true),
// to prevent VSCode from prompting to create them.
fn map_filespec_to_local(&self, filespec: &SBFileSpec) -> Option<Rc<PathBuf>> {
if !filespec.is_valid() {
return None;
} else {
let source_path = filespec.path();
let mut source_map_cache = self.source_map_cache.borrow_mut();
match source_map_cache.get(&source_path) {
Some(mapped_path) => mapped_path.clone(),
None => {
let mut path = filespec.path();
// Make sure the path is absolute.
if path.is_relative() {
path = self.relative_path_base.join(path);
}
path = normalize_path(path);
// VSCode sometimes fails to compare equal paths that differ in casing.
let mapped_path = match get_fs_path_case(&path) {
Ok(path) if path.is_file() => Some(Rc::new(path)),
_ => {
if self.suppress_missing_files {
None
} else {
Some(Rc::new(path))
}
}
};
// Cache the result, so we don't have to probe file system again for the same path.
source_map_cache.insert(source_path, mapped_path.clone());
mapped_path
}
}
}
}
fn context_from_frame(&self, frame: Option<&SBFrame>) -> SBExecutionContext {
match frame {
Some(frame) => SBExecutionContext::from_frame(&frame),
None => match self.process {
Initialized(ref process) => {
let thread = process.selected_thread();
SBExecutionContext::from_thread(&thread)
}
NotInitialized => {
let target = self.debugger.selected_target();
SBExecutionContext::from_target(&target)
}
},
}
}
}
impl Drop for DebugSession {
fn drop(&mut self) {
debug!("DebugSession::drop()");
}
}
fn into_string_lossy(cstr: &CStr) -> String {
cstr.to_string_lossy().into_owned()
}
| 42.066249 | 136 | 0.525775 |
14394b8da2536794a31d6bcbe7affc3193f9a10d | 334 | use std::collections::HashMap;
fn main() {
let teams_name = vec![String::from("KK"), String::from("LQ"), String::from("IU"),String::from("PZ"),String::from("QG")];
let teams_points = vec![10,20,30,50];
let points_table: HashMap<_,_> = teams_name.iter().zip(teams_points.iter()).collect();
println!("{:?}",points_table);
}
| 20.875 | 120 | 0.643713 |
ddfc8f2206557bb4a2ff5dabe56b50239098657b | 2,335 | use super::attributes::AttrSetting;
use crate::binwrite_endian::Endian;
use proc_macro2::TokenStream;
use std::num::NonZeroUsize;
pub enum Action {
Default,
CutomerWriter(TokenStream),
}
#[derive(Default)]
pub struct GenOptions {
pub pad_before: Option<NonZeroUsize>,
pub pad_after: Option<NonZeroUsize>,
pub align_before: Option<NonZeroUsize>,
pub align_after: Option<NonZeroUsize>,
pub preprocessor: Option<TokenStream>,
pub postprocessor: Option<TokenStream>,
}
#[derive(Default)]
pub struct OptionalWriterOption {
pub endian: Option<Endian>
}
pub struct WriteInstructions(pub Action, pub OptionalWriterOption, pub GenOptions);
impl WriteInstructions {
pub fn try_from(settings: &[AttrSetting]) -> Option<WriteInstructions> {
let mut action: Action = Action::Default;
let mut writer_option = OptionalWriterOption::default();
let mut gen_options = GenOptions::default();
for setting in settings.iter() {
match setting {
AttrSetting::Endian(endian) => {
writer_option.endian = Some(*endian);
}
AttrSetting::With(writer_func) => {
action = Action::CutomerWriter(writer_func.clone());
}
AttrSetting::Preprocessor(preprocessor) => {
gen_options.preprocessor = Some(preprocessor.clone());
}
AttrSetting::Postprocessor(preprocessor) => {
gen_options.postprocessor = Some(preprocessor.clone());
}
AttrSetting::AlignBefore(pad) => {
gen_options.align_before = NonZeroUsize::new(*pad);
}
AttrSetting::AlignAfter(pad) => {
gen_options.align_after = NonZeroUsize::new(*pad);
}
AttrSetting::PadBefore(pad) => {
gen_options.pad_before = NonZeroUsize::new(*pad);
}
AttrSetting::PadAfter(pad) => {
gen_options.pad_after = NonZeroUsize::new(*pad);
}
AttrSetting::Ignore => {
None?
}
}
}
Some(WriteInstructions(action, writer_option, gen_options))
}
}
| 34.338235 | 83 | 0.570021 |
71a14bdbc466dc140aecd21a493a72e54edf96bf | 6,646 | use crate::markers::ResolutionSupport;
use crate::{conversion, ic, Address, Config, Error, FaultQueue, Lm75, OsMode, OsPolarity};
use core::marker::PhantomData;
use embedded_hal::blocking::i2c;
struct Register;
impl Register {
const TEMPERATURE: u8 = 0x00;
const CONFIGURATION: u8 = 0x01;
const T_HYST: u8 = 0x02;
const T_OS: u8 = 0x03;
const T_IDLE: u8 = 0x04;
}
struct BitFlags;
impl BitFlags {
const SHUTDOWN: u8 = 0b0000_0001;
const COMP_INT: u8 = 0b0000_0010;
const OS_POLARITY: u8 = 0b0000_0100;
const FAULT_QUEUE0: u8 = 0b0000_1000;
const FAULT_QUEUE1: u8 = 0b0001_0000;
}
impl<I2C, E> Lm75<I2C, ic::Lm75>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the LM75 device.
pub fn new<A: Into<Address>>(i2c: I2C, address: A) -> Self {
let a = address.into();
Lm75 {
i2c,
address: a.0,
config: Config::default(),
_ic: PhantomData,
}
}
}
impl<I2C, IC, E> Lm75<I2C, IC>
where
I2C: i2c::Write<Error = E>,
IC: ResolutionSupport<E>,
{
/// Destroy driver instance, return I²C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
/// Enable the sensor (default state).
pub fn enable(&mut self) -> Result<(), Error<E>> {
let config = self.config;
self.write_config(config.with_low(BitFlags::SHUTDOWN))
}
/// Disable the sensor (shutdown).
pub fn disable(&mut self) -> Result<(), Error<E>> {
let config = self.config;
self.write_config(config.with_high(BitFlags::SHUTDOWN))
}
/// Set the fault queue.
///
/// Set the number of consecutive faults that will trigger an OS condition.
pub fn set_fault_queue(&mut self, fq: FaultQueue) -> Result<(), Error<E>> {
let config = self.config;
match fq {
FaultQueue::_1 => self.write_config(
config
.with_low(BitFlags::FAULT_QUEUE1)
.with_low(BitFlags::FAULT_QUEUE0),
),
FaultQueue::_2 => self.write_config(
config
.with_low(BitFlags::FAULT_QUEUE1)
.with_high(BitFlags::FAULT_QUEUE0),
),
FaultQueue::_4 => self.write_config(
config
.with_high(BitFlags::FAULT_QUEUE1)
.with_low(BitFlags::FAULT_QUEUE0),
),
FaultQueue::_6 => self.write_config(
config
.with_high(BitFlags::FAULT_QUEUE1)
.with_high(BitFlags::FAULT_QUEUE0),
),
}
}
/// Set the OS polarity.
pub fn set_os_polarity(&mut self, polarity: OsPolarity) -> Result<(), Error<E>> {
let config = self.config;
match polarity {
OsPolarity::ActiveLow => self.write_config(config.with_low(BitFlags::OS_POLARITY)),
OsPolarity::ActiveHigh => self.write_config(config.with_high(BitFlags::OS_POLARITY)),
}
}
/// Set the OS operation mode.
pub fn set_os_mode(&mut self, mode: OsMode) -> Result<(), Error<E>> {
let config = self.config;
match mode {
OsMode::Comparator => self.write_config(config.with_low(BitFlags::COMP_INT)),
OsMode::Interrupt => self.write_config(config.with_high(BitFlags::COMP_INT)),
}
}
/// Set the OS temperature (celsius).
#[allow(clippy::manual_range_contains)]
pub fn set_os_temperature(&mut self, temperature: f32) -> Result<(), Error<E>> {
if temperature < -55.0 || temperature > 125.0 {
return Err(Error::InvalidInputData);
}
let (msb, lsb) =
conversion::convert_temp_to_register(temperature, IC::get_resolution_mask());
self.i2c
.write(self.address, &[Register::T_OS, msb, lsb])
.map_err(Error::I2C)
}
/// Set the hysteresis temperature (celsius).
#[allow(clippy::manual_range_contains)]
pub fn set_hysteresis_temperature(&mut self, temperature: f32) -> Result<(), Error<E>> {
if temperature < -55.0 || temperature > 125.0 {
return Err(Error::InvalidInputData);
}
let (msb, lsb) =
conversion::convert_temp_to_register(temperature, IC::get_resolution_mask());
self.i2c
.write(self.address, &[Register::T_HYST, msb, lsb])
.map_err(Error::I2C)
}
/// write configuration to device
fn write_config(&mut self, config: Config) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &[Register::CONFIGURATION, config.bits])
.map_err(Error::I2C)?;
self.config = config;
Ok(())
}
}
impl<I2C, E> Lm75<I2C, ic::Pct2075>
where
I2C: i2c::Write<Error = E> + i2c::WriteRead<Error = E>,
{
/// Create new instance of the PCT2075 device.
pub fn new_pct2075<A: Into<Address>>(i2c: I2C, address: A) -> Self {
let a = address.into();
Lm75 {
i2c,
address: a.0,
config: Config::default(),
_ic: PhantomData,
}
}
/// Set the sensor sample rate period in milliseconds (100ms increments).
///
/// For values outside of the range `[100 - 3100]` or those not a multiple of 100,
/// `Error::InvalidInputData will be returned
pub fn set_sample_rate(&mut self, period: u16) -> Result<(), Error<E>> {
if period > 3100 || period % 100 != 0 {
return Err(Error::InvalidInputData);
}
let byte = conversion::convert_sample_rate_to_register(period);
self.i2c
.write(self.address, &[Register::T_IDLE, byte])
.map_err(Error::I2C)
}
/// Read the sample rate period from the sensor (ms).
pub fn read_sample_rate(&mut self) -> Result<u16, Error<E>> {
let mut data = [0; 1];
self.i2c
.write_read(self.address, &[Register::T_IDLE], &mut data)
.map_err(Error::I2C)?;
Ok(conversion::convert_sample_rate_from_register(data[0]))
}
}
impl<I2C, IC, E> Lm75<I2C, IC>
where
I2C: i2c::WriteRead<Error = E>,
IC: ResolutionSupport<E>,
{
/// Read the temperature from the sensor (celsius).
pub fn read_temperature(&mut self) -> Result<f32, Error<E>> {
let mut data = [0; 2];
self.i2c
.write_read(self.address, &[Register::TEMPERATURE], &mut data)
.map_err(Error::I2C)?;
Ok(conversion::convert_temp_from_register(
data[0],
data[1],
IC::get_resolution_mask(),
))
}
}
| 32.738916 | 97 | 0.575083 |
d7a23f36ad102ee1f7b3f5f62c4ad22c8b13814d | 2,908 | //! File declarations
use crate::errors::Result;
use crate::{Diagnostics, Flavor, RpDecl, Span, Translate, Translator, Version};
use linked_hash_map::LinkedHashMap;
use serde::Serialize;
use std::collections::VecDeque;
/// Information about an enabled feature.
#[derive(Debug, Clone, Serialize)]
pub struct RpEnabledFeature {
pub span: Span,
}
#[derive(Debug, Clone, Serialize)]
#[serde(
bound = "F: Serialize, F::Field: Serialize, F::Endpoint: Serialize, F::Package: Serialize, \
F::Name: Serialize, F::EnumType: Serialize"
)]
pub struct RpFile<F>
where
F: Flavor,
{
/// File-level comments.
pub comment: Vec<String>,
/// The schema version in use.
pub version: Version,
/// Features enabled and where they are enabled.
pub features: LinkedHashMap<&'static str, RpEnabledFeature>,
/// All nested declarations.
pub decls: Vec<RpDecl<F>>,
/// references to the local idents of the declarations.
pub decl_idents: LinkedHashMap<String, usize>,
}
/// Iterator over all declarations in a file.
pub struct ForEachDecl<'a, F>
where
F: Flavor,
{
queue: VecDeque<&'a RpDecl<F>>,
}
impl<'a, F> Iterator for ForEachDecl<'a, F>
where
F: Flavor,
{
type Item = &'a RpDecl<F>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(decl) = self.queue.pop_front() {
self.queue.extend(decl.decls());
Some(decl)
} else {
None
}
}
}
impl<F> RpFile<F>
where
F: Flavor,
{
/// Iterate over all declarations in file.
pub fn for_each_decl(&self) -> ForEachDecl<F> {
let mut queue = VecDeque::new();
queue.extend(self.decls.iter());
ForEachDecl { queue }
}
/// Lookup a single declaration from its path.
pub fn decl_by_path<'a, 's>(
&'a self,
mut path: impl Iterator<Item = &'s str>,
) -> Option<&'a RpDecl<F>> {
let first = match path.next() {
Some(first) => first,
None => return None,
};
let mut decl = match self.decl_idents.get(first) {
Some(index) => self.decls.get(*index),
None => None,
};
for step in path {
let next = match decl.as_ref() {
Some(decl) => decl.decl_by_ident(step),
None => return None,
};
decl = next;
}
decl
}
}
impl<T> Translate<T> for RpFile<T::Source>
where
T: Translator,
{
type Out = RpFile<T::Target>;
/// Translate into different flavor.
fn translate(self, diag: &mut Diagnostics, translator: &T) -> Result<RpFile<T::Target>> {
Ok(RpFile {
comment: self.comment,
version: self.version,
features: self.features,
decls: self.decls.translate(diag, translator)?,
decl_idents: self.decl_idents,
})
}
}
| 25.068966 | 96 | 0.581499 |
395526b6e1cfa7ca435ba38f2d390b78bc1fdbfa | 1,962 | mod indexa_config;
mod routes;
use actix_web::{middleware, web, App, HttpServer};
use indexa::database::Database;
use std::path::PathBuf;
use structopt::{clap::AppSettings, StructOpt};
#[derive(Debug, StructOpt)]
#[structopt(
author = env!("CARGO_PKG_AUTHORS"),
rename_all = "kebab-case",
setting(AppSettings::ColoredHelp),
setting(AppSettings::DeriveDisplayOrder),
setting(AppSettings::AllArgsOverrideSelf)
)]
pub struct Opt {
/// Address to listen on
#[structopt(short, long, default_value = "127.0.0.1:8080")]
addr: String,
/// Number of threads to use.
///
/// Defaults to the number of available CPUs minus 1.
#[structopt(short, long)]
threads: Option<usize>,
/// Location of a config file.
#[structopt(short = "C", long)]
config: Option<PathBuf>,
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
let opt = Opt::from_args();
let mut indexa_config = indexa_config::read_config(opt.config.as_ref()).unwrap();
indexa_config.flags.merge_opt(&opt);
let db_location = indexa_config
.database
.location
.expect("Could not determine the location of database file. Please edit the config file.");
let database: Database =
bincode::deserialize(&std::fs::read(db_location)?).expect("Failed to load database");
let database = web::Data::new(database);
rayon::ThreadPoolBuilder::new()
.num_threads(indexa_config.flags.threads)
.build_global()
.expect("Could not build thread pool");
eprintln!("Listening on http://{}", opt.addr);
HttpServer::new(move || {
App::new()
.wrap(middleware::Logger::default())
.wrap(middleware::Compress::default())
.app_data(database.clone())
.configure(routes::configure)
})
.bind(opt.addr)?
.run()
.await
}
| 28.852941 | 99 | 0.642202 |
166a4088b2d9c496cf755aba7eb9895ae211d47d | 579 | use std::env;
use dotenv::dotenv;
use giphy::v1::gifs::SearchRequest;
use giphy::v1::sync::*;
pub fn main() {
dotenv().ok();
let api_key = env::var("GIPHY_API_KEY_TEST")
.unwrap_or_else(|e| panic!("Error retrieving env variable: {:?}", e));
let client = reqwest::blocking::Client::new();
let api = SyncApi::new(api_key, client);
let response = SearchRequest::new("rage")
.with_limit(10)
.send_to(&api)
.unwrap_or_else(|e| panic!("Error while calling search endpoint: {:?}", e));
println!("Response: {:?}", response);
}
| 27.571429 | 84 | 0.609672 |
d69aa88adc0f0e488e90dc77f068fc62be44ba0f | 12,966 | // SPDX-License-Identifier: Apache-2.0
// This file is part of Frontier.
//
// Copyright (c) 2020 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! EVM stack-based runner.
use crate::runner::Runner as RunnerT;
use crate::{
AccountCodes,
AccountStorages,
AddressMapping,
Config,
Error,
Event,
FeeCalculator,
OnChargeEVMTransaction,
Pallet,
PrecompileSet,
// BlockHashMapping
};
use evm::backend::Backend as BackendT;
use evm::executor::{StackExecutor, StackState as StackStateT, StackSubstateMetadata};
use evm::{ExitError, ExitReason, Transfer};
use fp_evm::{CallInfo, CreateInfo, ExecutionInfo, Log, Vicinity};
use frame_support::{
ensure,
traits::{Currency, ExistenceRequirement, Get},
};
use sha3::{Digest, Keccak256};
use sp_core::{H160, H256, U256};
use sp_runtime::traits::UniqueSaturatedInto;
use sp_std::{boxed::Box, collections::btree_set::BTreeSet, marker::PhantomData, mem, vec::Vec};
#[derive(Default)]
pub struct Runner<T: Config> {
_marker: PhantomData<T>,
}
impl<T: Config> Runner<T> {
/// Execute an EVM operation.
pub fn execute<'config, F, R>(
source: H160,
value: U256,
gas_limit: u64,
gas_price: Option<U256>,
nonce: Option<U256>,
config: &'config evm::Config,
f: F,
) -> Result<ExecutionInfo<R>, Error<T>>
where
F: FnOnce(&mut StackExecutor<'config, SubstrateStackState<'_, 'config, T>>) -> (ExitReason, R),
{
// Gas price check is skipped when performing a gas estimation.
let gas_price = match gas_price {
Some(gas_price) => {
ensure!(
gas_price >= T::FeeCalculator::min_gas_price(),
Error::<T>::GasPriceTooLow
);
gas_price
}
None => Default::default(),
};
let vicinity = Vicinity {
gas_price,
origin: source,
};
let metadata = StackSubstateMetadata::new(gas_limit, &config);
let state = SubstrateStackState::new(&vicinity, metadata);
let mut executor = StackExecutor::new_with_precompile(state, config, T::Precompiles::execute);
let total_fee = gas_price
.checked_mul(U256::from(gas_limit))
.ok_or(Error::<T>::FeeOverflow)?;
let total_payment = value.checked_add(total_fee).ok_or(Error::<T>::PaymentOverflow)?;
let source_account = Pallet::<T>::account_basic(&source);
ensure!(source_account.balance >= total_payment, Error::<T>::BalanceLow);
if let Some(nonce) = nonce {
ensure!(source_account.nonce == nonce, Error::<T>::InvalidNonce);
}
// Deduct fee from the `source` account.
let fee = T::OnChargeTransaction::withdraw_fee(&source, total_fee)?;
// Execute the EVM call.
let (reason, retv) = f(&mut executor);
let used_gas = U256::from(executor.used_gas());
let actual_fee = executor.fee(gas_price);
log::debug!(
target: "evm",
"Execution {:?} [source: {:?}, value: {}, gas_limit: {}, actual_fee: {}]",
reason,
source,
value,
gas_limit,
actual_fee
);
// Refund fees to the `source` account if deducted more before,
T::OnChargeTransaction::correct_and_deposit_fee(&source, actual_fee, fee)?;
let state = executor.into_state();
for address in state.substate.deletes {
log::debug!(
target: "evm",
"Deleting account at {:?}",
address
);
Pallet::<T>::remove_account(&address)
}
for log in &state.substate.logs {
log::trace!(
target: "evm",
"Inserting log for {:?}, topics ({}) {:?}, data ({}): {:?}]",
log.address,
log.topics.len(),
log.topics,
log.data.len(),
log.data
);
Pallet::<T>::deposit_event(Event::<T>::Log(Log {
address: log.address,
topics: log.topics.clone(),
data: log.data.clone(),
}));
}
Ok(ExecutionInfo {
value: retv,
exit_reason: reason,
used_gas,
logs: state.substate.logs,
})
}
}
impl<T: Config> RunnerT<T> for Runner<T> {
type Error = Error<T>;
fn call(
source: H160,
target: H160,
input: Vec<u8>,
value: U256,
gas_limit: u64,
gas_price: Option<U256>,
nonce: Option<U256>,
config: &evm::Config,
) -> Result<CallInfo, Self::Error> {
Self::execute(source, value, gas_limit, gas_price, nonce, config, |executor| {
executor.transact_call(source, target, value, input, gas_limit, vec![])
})
}
fn create(
source: H160,
init: Vec<u8>,
value: U256,
gas_limit: u64,
gas_price: Option<U256>,
nonce: Option<U256>,
config: &evm::Config,
) -> Result<CreateInfo, Self::Error> {
Self::execute(source, value, gas_limit, gas_price, nonce, config, |executor| {
let address = executor.create_address(evm::CreateScheme::Legacy { caller: source });
(
executor.transact_create(source, value, init, gas_limit, vec![]),
address,
)
})
}
fn create2(
source: H160,
init: Vec<u8>,
salt: H256,
value: U256,
gas_limit: u64,
gas_price: Option<U256>,
nonce: Option<U256>,
config: &evm::Config,
) -> Result<CreateInfo, Self::Error> {
let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice());
Self::execute(source, value, gas_limit, gas_price, nonce, config, |executor| {
let address = executor.create_address(evm::CreateScheme::Create2 {
caller: source,
code_hash,
salt,
});
(
executor.transact_create2(source, value, init, salt, gas_limit, vec![]),
address,
)
})
}
}
struct SubstrateStackSubstate<'config> {
metadata: StackSubstateMetadata<'config>,
deletes: BTreeSet<H160>,
logs: Vec<Log>,
parent: Option<Box<SubstrateStackSubstate<'config>>>,
}
impl<'config> SubstrateStackSubstate<'config> {
pub fn metadata(&self) -> &StackSubstateMetadata<'config> {
&self.metadata
}
pub fn metadata_mut(&mut self) -> &mut StackSubstateMetadata<'config> {
&mut self.metadata
}
pub fn enter(&mut self, gas_limit: u64, is_static: bool) {
let mut entering = Self {
metadata: self.metadata.spit_child(gas_limit, is_static),
parent: None,
deletes: BTreeSet::new(),
logs: Vec::new(),
};
mem::swap(&mut entering, self);
self.parent = Some(Box::new(entering));
sp_io::storage::start_transaction();
}
pub fn exit_commit(&mut self) -> Result<(), ExitError> {
let mut exited = *self.parent.take().expect("Cannot commit on root substate");
mem::swap(&mut exited, self);
self.metadata.swallow_commit(exited.metadata)?;
self.logs.append(&mut exited.logs);
self.deletes.append(&mut exited.deletes);
sp_io::storage::commit_transaction();
Ok(())
}
pub fn exit_revert(&mut self) -> Result<(), ExitError> {
let mut exited = *self.parent.take().expect("Cannot discard on root substate");
mem::swap(&mut exited, self);
self.metadata.swallow_revert(exited.metadata)?;
sp_io::storage::rollback_transaction();
Ok(())
}
pub fn exit_discard(&mut self) -> Result<(), ExitError> {
let mut exited = *self.parent.take().expect("Cannot discard on root substate");
mem::swap(&mut exited, self);
self.metadata.swallow_discard(exited.metadata)?;
sp_io::storage::rollback_transaction();
Ok(())
}
pub fn deleted(&self, address: H160) -> bool {
if self.deletes.contains(&address) {
return true;
}
if let Some(parent) = self.parent.as_ref() {
return parent.deleted(address);
}
false
}
pub fn set_deleted(&mut self, address: H160) {
self.deletes.insert(address);
}
pub fn log(&mut self, address: H160, topics: Vec<H256>, data: Vec<u8>) {
self.logs.push(Log { address, topics, data });
}
}
/// Substrate backend for EVM.
pub struct SubstrateStackState<'vicinity, 'config, T> {
vicinity: &'vicinity Vicinity,
substate: SubstrateStackSubstate<'config>,
_marker: PhantomData<T>,
}
impl<'vicinity, 'config, T: Config> SubstrateStackState<'vicinity, 'config, T> {
/// Create a new backend with given vicinity.
pub fn new(vicinity: &'vicinity Vicinity, metadata: StackSubstateMetadata<'config>) -> Self {
Self {
vicinity,
substate: SubstrateStackSubstate {
metadata,
deletes: BTreeSet::new(),
logs: Vec::new(),
parent: None,
},
_marker: PhantomData,
}
}
}
impl<'vicinity, 'config, T: Config> BackendT for SubstrateStackState<'vicinity, 'config, T> {
fn gas_price(&self) -> U256 {
self.vicinity.gas_price
}
fn origin(&self) -> H160 {
self.vicinity.origin
}
fn block_hash(&self, number: U256) -> H256 {
if number > U256::from(u32::max_value()) {
H256::default()
} else {
// T::BlockHashMapping::block_hash(number.as_u32())
H256::default()
}
}
fn block_number(&self) -> U256 {
let number: u128 = frame_system::Pallet::<T>::block_number().unique_saturated_into();
U256::from(number)
}
fn block_coinbase(&self) -> H160 {
Pallet::<T>::find_author()
}
fn block_timestamp(&self) -> U256 {
let now: u128 = pallet_timestamp::Pallet::<T>::get().unique_saturated_into();
U256::from(now / 1000)
}
fn block_difficulty(&self) -> U256 {
U256::zero()
}
fn block_gas_limit(&self) -> U256 {
T::BlockGasLimit::get()
}
fn chain_id(&self) -> U256 {
U256::from(T::ChainId::get())
}
fn exists(&self, _address: H160) -> bool {
true
}
fn basic(&self, address: H160) -> evm::backend::Basic {
let account = Pallet::<T>::account_basic(&address);
evm::backend::Basic {
balance: account.balance,
nonce: account.nonce,
}
}
fn code(&self, address: H160) -> Vec<u8> {
<AccountCodes<T>>::get(&address)
}
fn storage(&self, address: H160, index: H256) -> H256 {
<AccountStorages<T>>::get(address, index)
}
fn original_storage(&self, _address: H160, _index: H256) -> Option<H256> {
None
}
}
impl<'vicinity, 'config, T: Config> StackStateT<'config> for SubstrateStackState<'vicinity, 'config, T> {
fn metadata(&self) -> &StackSubstateMetadata<'config> {
self.substate.metadata()
}
fn metadata_mut(&mut self) -> &mut StackSubstateMetadata<'config> {
self.substate.metadata_mut()
}
fn enter(&mut self, gas_limit: u64, is_static: bool) {
self.substate.enter(gas_limit, is_static)
}
fn exit_commit(&mut self) -> Result<(), ExitError> {
self.substate.exit_commit()
}
fn exit_revert(&mut self) -> Result<(), ExitError> {
self.substate.exit_revert()
}
fn exit_discard(&mut self) -> Result<(), ExitError> {
self.substate.exit_discard()
}
fn is_empty(&self, address: H160) -> bool {
Pallet::<T>::is_account_empty(&address)
}
fn deleted(&self, address: H160) -> bool {
self.substate.deleted(address)
}
fn is_cold(&self, address: H160) -> bool {
false
}
fn is_storage_cold(&self, address: H160, key: H256) -> bool {
false
}
fn inc_nonce(&mut self, address: H160) {
let account_id = T::AddressMapping::into_account_id(address);
frame_system::Pallet::<T>::inc_account_nonce(&account_id);
}
fn set_storage(&mut self, address: H160, index: H256, value: H256) {
if value == H256::default() {
log::debug!(
target: "evm",
"Removing storage for {:?} [index: {:?}]",
address,
index,
);
<AccountStorages<T>>::remove(address, index);
} else {
log::debug!(
target: "evm",
"Updating storage for {:?} [index: {:?}, value: {:?}]",
address,
index,
value,
);
<AccountStorages<T>>::insert(address, index, value);
}
}
fn reset_storage(&mut self, address: H160) {
<AccountStorages<T>>::remove_prefix(address, None);
}
fn log(&mut self, address: H160, topics: Vec<H256>, data: Vec<u8>) {
self.substate.log(address, topics, data)
}
fn set_deleted(&mut self, address: H160) {
self.substate.set_deleted(address)
}
fn set_code(&mut self, address: H160, code: Vec<u8>) {
log::debug!(
target: "evm",
"Inserting code ({} bytes) at {:?}",
code.len(),
address
);
Pallet::<T>::create_account(address, code);
}
fn transfer(&mut self, transfer: Transfer) -> Result<(), ExitError> {
let source = T::AddressMapping::into_account_id(transfer.source);
let target = T::AddressMapping::into_account_id(transfer.target);
T::Currency::transfer(
&source,
&target,
transfer.value.low_u128().unique_saturated_into(),
ExistenceRequirement::AllowDeath,
)
.map_err(|_| ExitError::OutOfFund)
}
fn reset_balance(&mut self, _address: H160) {
// Do nothing on reset balance in Substrate.
//
// This function exists in EVM because a design issue
// (arguably a bug) in SELFDESTRUCT that can cause total
// issurance to be reduced. We do not need to replicate this.
}
fn touch(&mut self, _address: H160) {
// Do nothing on touch in Substrate.
//
// EVM pallet considers all accounts to exist, and distinguish
// only empty and non-empty accounts. This avoids many of the
// subtle issues in EIP-161.
}
}
| 25.523622 | 105 | 0.668286 |
0aaad3fef104d55b150974aa051c6875c29d5117 | 16,101 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
#![feature(allocator_api)]
use std::alloc::{Allocator, Layout};
use std::hash::BuildHasherDefault;
use std::ptr::NonNull;
use lz4::liblz4;
use nohash_hasher::NoHashHasher;
use once_cell::unsync::OnceCell;
use shmrs::chashmap::{CMap, CMapRef};
use shmrs::mapalloc::MapAlloc;
use ocamlrep::{ptr::UnsafeOcamlPtr, Value, STRING_TAG};
use ocamlrep_ocamlpool::catch_unwind;
type HashBuilder = BuildHasherDefault<NoHashHasher<u64>>;
type HackMap = CMapRef<'static, u64, HeapValue, HashBuilder>;
thread_local! {
static CMAP: OnceCell<HackMap> = OnceCell::new();
}
extern "C" {
fn caml_input_value_from_block(data: *const u8, size: usize) -> usize;
fn caml_alloc_initialized_string(size: usize, data: *const u8) -> usize;
fn caml_output_value_to_malloc(value: usize, flags: usize, ptr: *mut usize, len: *mut usize);
// TODO(hverr): Switch to Rust buffer allocation.
fn free(data: *const u8);
fn malloc(size: libc::size_t) -> *mut u8;
}
#[derive(Clone, Copy)]
struct HeapValueHeader(u64);
impl HeapValueHeader {
fn new(buffer_size: usize, uncompressed_size: usize, is_serialized: bool) -> Self {
let buffer_size: u32 = buffer_size.try_into().unwrap();
let uncompressed_size: u32 = uncompressed_size.try_into().unwrap();
// Make sure the MSB are 0 because we will need 1 additional bits for
// another field and 1 bit for OCaml's internals.
assert_eq!(buffer_size & (1 << 31), 0);
assert_eq!(uncompressed_size & (1 << 31), 0);
let mut result: u64 = 0;
result |= buffer_size as u64;
result |= (uncompressed_size as u64) << 31;
result |= (is_serialized as u64) << 62;
Self(result)
}
/// Size of the buffer attached to this value.
fn buffer_size(&self) -> usize {
(self.0 & ((1 << 31) - 1)) as usize
}
/// Size if the buffer were uncompressed.
fn uncompressed_size(&self) -> usize {
((self.0 >> 31) & ((1 << 31) - 1)) as usize
}
/// Was the buffer serialized, or does it contain a raw OCaml string?
fn is_serialized(&self) -> bool {
((self.0 >> 62) & 1) == 1
}
/// Was the buffer compressed?
fn is_compressed(&self) -> bool {
self.uncompressed_size() != self.buffer_size()
}
}
/// A value stored in shared-memory.
///
/// This is just a pointer to some buffer in shared-memory,
/// together with some metadata.
///
/// Note that it does not implement drop to deallocate the
/// underlying buffer. That would require tracking which
/// shard allocator was originally used to allocate the buffer,
/// as values can freely move between shards. The memory overhead
/// for this is prohibitively expensive.
struct HeapValue {
header: HeapValueHeader,
data: NonNull<u8>,
}
impl HeapValue {
/// Convert the heap value into an OCaml object.
///
/// Safety: this allocates in the OCaml heap, and thus enters the runtime.
/// It may deallocate each and every object you haven't registered as a
/// root. It may even reallocate (i.e. move from the young generation to
/// the old) values *inside* registered nodes). There's no guarantee that
/// every object reachable from a root won't move!
unsafe fn to_ocaml_value(&self) -> usize {
if !self.header.is_serialized() {
caml_alloc_initialized_string(self.header.buffer_size(), self.data.as_ptr())
} else if !self.header.is_compressed() {
caml_input_value_from_block(self.data.as_ptr(), self.header.buffer_size())
} else {
// TODO(hverr): Make thus more Rust-idiomatic
let data = malloc(self.header.uncompressed_size());
let uncompressed_size = liblz4::LZ4_decompress_safe(
self.data.as_ptr() as *const i8,
data as *mut i8,
self.header.buffer_size().try_into().unwrap(),
self.header.uncompressed_size().try_into().unwrap(),
);
assert!(self.header.uncompressed_size() == uncompressed_size as usize);
let result = caml_input_value_from_block(data, uncompressed_size as libc::size_t);
free(data);
result
}
}
fn clone_in(&self, alloc: &MapAlloc<'static>) -> HeapValue {
let layout = Layout::from_size_align(self.header.buffer_size(), 1).unwrap();
let mut data = alloc.allocate(layout).unwrap();
// Safety: we are the only ones with access to the allocated chunk.
unsafe {
data.as_mut().copy_from_slice(self.as_slice())
};
HeapValue {
header: self.header,
data: data.cast(),
}
}
fn as_slice(&self) -> &[u8] {
let len = self.header.buffer_size();
// Safety: We own the data. The return value cannot outlive `self`.
unsafe { std::slice::from_raw_parts(self.data.as_ptr(), len) }
}
}
enum SerializedValue<'a> {
String(Value<'a>),
Serialized {
ptr: *mut u8,
len: usize,
},
Compressed {
ptr: *mut u8,
len: usize,
uncompressed_size: i32,
},
}
impl<'a> SerializedValue<'a> {
fn from(value: Value<'a>) -> Self {
use SerializedValue::*;
// We are entering the OCaml runtime, is there a risk
// that `value` (or other values) get deallocated?
// I don't think so: caml_output_value_to_malloc shouldn't
// allocate on the OCaml heap, and thus not trigger the GC.
if value
.as_block()
.map_or(false, |blck| blck.tag() == STRING_TAG)
{
String(value)
} else {
let mut ptr: usize = 0;
let mut len: usize = 0;
unsafe {
caml_output_value_to_malloc(
value.to_bits(),
Value::int(0).to_bits(),
(&mut ptr) as *mut usize,
(&mut len) as *mut usize,
)
};
Serialized {
ptr: ptr as *mut u8,
len,
}
}
}
fn as_slice(&self) -> &[u8] {
use SerializedValue::*;
match self {
String(value) => value.as_byte_string().unwrap(),
&Serialized { ptr, len } | &Compressed { ptr, len, .. } => unsafe {
std::slice::from_raw_parts(ptr as *const u8, len)
},
}
}
fn maybe_compress(self) -> Self {
use SerializedValue::*;
match self {
String(value) => String(value),
Compressed {
ptr,
len,
uncompressed_size,
} => Compressed {
ptr,
len,
uncompressed_size,
},
Serialized { ptr, len } => unsafe {
let uncompressed_size: i32 = len.try_into().unwrap();
let max_compression_size = liblz4::LZ4_compressBound(uncompressed_size);
let compressed_data = malloc(max_compression_size as libc::size_t);
let compressed_size = liblz4::LZ4_compress_default(
ptr as *const i8,
compressed_data as *mut i8,
uncompressed_size,
max_compression_size,
);
if compressed_size == 0 || compressed_size >= uncompressed_size {
free(compressed_data);
Serialized { ptr, len }
} else {
free(ptr);
Compressed {
ptr: compressed_data,
len: compressed_size as usize,
uncompressed_size,
}
}
},
}
}
fn free(self) {
use SerializedValue::*;
match self {
String(..) => {}
Serialized { ptr, .. } | Compressed { ptr, .. } => unsafe {
free(ptr);
},
}
}
fn to_heap_value_in(&self, alloc: &MapAlloc<'static>) -> HeapValue {
let slice = self.as_slice();
let layout = Layout::from_size_align(slice.len(), 1).unwrap();
let mut data = alloc.allocate(layout).unwrap();
// Safety: we are the only ones with access to the allocated chunk.
unsafe {
data.as_mut().copy_from_slice(slice)
};
use SerializedValue::*;
let header = match self {
String(..) => HeapValueHeader::new(slice.len(), slice.len(), false),
Serialized { .. } => HeapValueHeader::new(slice.len(), slice.len(), true),
Compressed {
uncompressed_size, ..
} => HeapValueHeader::new(slice.len(), *uncompressed_size as usize, true),
};
HeapValue {
header,
data: data.cast(),
}
}
pub fn uncompressed_size(&self) -> usize {
use SerializedValue::*;
match self {
String(..) => self.as_slice().len(),
Serialized { ptr: _, len } => *len,
Compressed {
ptr: _,
uncompressed_size,
len: _,
} => *uncompressed_size as usize,
}
}
pub fn compressed_size(&self) -> usize {
use SerializedValue::*;
match self {
String(..) => self.as_slice().len(),
Serialized { ptr: _, len } => *len,
Compressed {
ptr: _,
uncompressed_size: _,
len,
} => *len,
}
}
}
fn with<R>(f: impl FnOnce(&HackMap) -> R) -> R {
CMAP.with(|cell| f(cell.get().unwrap()))
}
#[no_mangle]
pub extern "C" fn shmffi_init(mmap_address: *mut libc::c_void, file_size: libc::size_t) {
catch_unwind(|| {
CMAP.with(move |cell| {
assert!(cell.get().is_none());
cell.get_or_init(move ||
// Safety:
// - We are the only one initializing!
unsafe {
CMap::initialize_with_hasher(
BuildHasherDefault::default(),
mmap_address,
file_size,
)
});
});
0
});
}
#[no_mangle]
pub extern "C" fn shmffi_attach(mmap_address: *mut libc::c_void, file_size: libc::size_t) {
catch_unwind(|| {
CMAP.with(move |cell| {
assert!(cell.get().is_none());
cell.get_or_init(move ||
// Safety:
// - Should be already initialized by the master process.
unsafe {
CMap::attach(mmap_address, file_size)
});
});
0
});
}
#[no_mangle]
pub extern "C" fn shmffi_add(hash: u64, data: usize) -> usize {
catch_unwind(|| {
let data = unsafe { Value::from_bits(data) };
let serialized = SerializedValue::from(data);
let compressed = serialized.maybe_compress();
let compressed_size = compressed.compressed_size();
let uncompressed_size = compressed.uncompressed_size();
with(|cmap| {
cmap.write_map(&hash, |shard| {
let heap_value = compressed.to_heap_value_in(shard.alloc_non_evictable);
shard.map.insert(hash, heap_value);
})
});
compressed.free();
// TODO(hverr): We don't have access to "total_size" (which includes
// alignment overhead), remove the third field.
let ret: (isize, isize, isize) = (
compressed_size as isize,
uncompressed_size as isize,
compressed_size as isize,
);
unsafe { ocamlrep_ocamlpool::to_ocaml(&ret) }
})
}
#[no_mangle]
pub extern "C" fn shmffi_get_and_deserialize(hash: u64) -> usize {
catch_unwind(|| {
with(|cmap| {
cmap.read_map(&hash, |map| {
let result = match &map.get(&hash) {
None => None,
Some(heap_value) => {
// Safety: we are not holding on to unrooted OCaml values.
//
// This value itself is unrooted, but we are not calling into
// the OCalm runtime after this. The option that will be allocated
// later is allocated via ocamlpool, which cannot trigger the GC.
let deserialized_value = unsafe { heap_value.to_ocaml_value() };
// Safety: the value is only used to wrap it in an option.
//
// Because we use ocamlpool below, the GC won't run while this
// value exists.
let deserialized_value = unsafe { UnsafeOcamlPtr::new(deserialized_value) };
Some(deserialized_value)
}
};
// Safety: we don't call into the OCaml runtime, so there's no
// risk of us GC'ing the deserialized value.
unsafe { ocamlrep_ocamlpool::to_ocaml(&result) }
})
})
})
}
#[no_mangle]
pub extern "C" fn shmffi_mem(hash: u64) -> usize {
catch_unwind(|| {
let flag = with(|cmap| cmap.read_map(&hash, |map| map.contains_key(&hash)));
Value::int(flag as isize).to_bits()
})
}
#[no_mangle]
pub extern "C" fn shmffi_mem_status(hash: u64) -> usize {
let flag = with(|cmap| cmap.read_map(&hash, |map| map.contains_key(&hash)));
// From hh_shared.c: 1 = present, -1 = not present
let result = if flag { 1 } else { -1 };
Value::int(result).to_bits()
}
#[no_mangle]
pub extern "C" fn shmffi_get_size(hash: u64) -> usize {
let size = with(|cmap| cmap.read_map(&hash, |map| map[&hash].header.buffer_size()));
Value::int(size as isize).to_bits()
}
#[no_mangle]
pub extern "C" fn shmffi_move(hash1: u64, hash2: u64) {
with(|cmap| {
let value = cmap.write_map(&hash1, |shard1| shard1.map.remove(&hash1).unwrap());
cmap.write_map(&hash2, |shard2| {
let cloned_value = value.clone_in(shard2.alloc_non_evictable);
shard2.map.insert(hash2, cloned_value);
});
});
}
#[no_mangle]
pub extern "C" fn shmffi_remove(hash: u64) -> usize {
let size = with(|cmap| {
cmap.write_map(&hash, |shard| {
let heap_value = shard.map.remove(&hash).unwrap();
heap_value.as_slice().len()
})
});
Value::int(size as isize).to_bits()
}
#[no_mangle]
pub extern "C" fn shmffi_allocated_bytes() -> usize {
catch_unwind(|| {
let bytes = with(|cmap| cmap.allocated_bytes());
Value::int(bytes as isize).to_bits()
})
}
#[no_mangle]
pub extern "C" fn shmffi_num_entries() -> usize {
catch_unwind(|| {
let num_entries = with(|cmap| cmap.len());
Value::int(num_entries as isize).to_bits()
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
#[test]
fn test_heap_value_header() {
const NUM_TESTS: usize = 100;
let mut rng = StdRng::from_seed([0; 32]);
for _ in 0..NUM_TESTS {
let buffer_size = (rng.gen::<u32>() & ((1 << 31) - 1)) as usize;
let uncompressed_size = if rng.gen_bool(0.5) {
buffer_size
} else {
(rng.gen::<u32>() & ((1 << 31) - 1)) as usize
};
let is_serialized = rng.gen_bool(0.5);
let header = HeapValueHeader::new(buffer_size, uncompressed_size, is_serialized);
assert_eq!(header.buffer_size(), buffer_size);
assert_eq!(header.uncompressed_size(), uncompressed_size);
assert_eq!(header.is_serialized(), is_serialized);
assert_eq!(header.is_compressed(), buffer_size != uncompressed_size);
}
}
}
| 32.92638 | 100 | 0.548537 |
e6bcb7989cf581e684dc8b01889dec1e618d2311 | 2,511 | use legion::prelude::Resources;
use crate::{
graphics::{
pipeline_manager::{PipelineDesc, PipelineManager},
renderer::DEPTH_FORMAT,
resources::GPUResourceManager,
},
AssetManager,
};
pub fn create(resources: &Resources) {
let asset_manager = resources.get::<AssetManager>().unwrap();
let mut pipeline_manager = resources.get_mut::<PipelineManager>().unwrap();
let mut resource_manager = resources.get_mut::<GPUResourceManager>().unwrap();
let device = resources.get::<wgpu::Device>().unwrap();
let sc_desc = resources.get::<wgpu::SwapChainDescriptor>().unwrap();
let mut skybox_desc = PipelineDesc::default();
skybox_desc.shader = "skybox.shader".to_string();
skybox_desc.color_state.format = sc_desc.format;
skybox_desc.depth_state = Some(wgpu::DepthStencilStateDescriptor {
format: DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: wgpu::CompareFunction::LessEqual,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
});
let skybox_material_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
component_type: wgpu::TextureComponentType::Float,
multisampled: false,
dimension: wgpu::TextureViewDimension::Cube,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
},
],
label: None,
});
resource_manager.add_bind_group_layout("skybox_material", skybox_material_layout);
skybox_desc.layouts = vec!["globals".to_string(), "skybox_material".to_string()];
skybox_desc.cull_mode = wgpu::CullMode::None;
skybox_desc
.vertex_state
.set_index_format(wgpu::IndexFormat::Uint16);
pipeline_manager.add_pipeline(
"skybox",
&skybox_desc,
vec!["globals"],
&device,
&asset_manager,
&resource_manager,
);
}
| 36.926471 | 86 | 0.61131 |
f52de3989ad4d72a68dc459ee6c93d414aa49884 | 4,752 | use super::write_arguments::WriteArguments;
use super::*;
use crate::{
query_ast::*,
query_graph::{Node, NodeRef, QueryGraph, QueryGraphDependency},
ParsedInputValue,
};
use connector::{Filter, ScalarCompare};
use prisma_models::{ModelRef, RelationFieldRef};
use std::{convert::TryInto, sync::Arc};
/// Handles nested update (one) cases.
/// The graph is expanded with the `Check` and `Update` nodes.
///
/// (illustration simplified, `Parent` / `Read Result` exemplary)
///
/// ```text
/// ┌──────┐
/// ┌──│Parent│────────┐
/// │ └──────┘ │
/// │ │ │
/// │ ▼ ▼
/// │ ┌──────┐ ┌───────────┐
/// │ │Check │ │Read result│
/// │ └──────┘ └───────────┘
/// │ │
/// │ ▼
/// │ ┌──────┐
/// └─▶│Update│
/// └──────┘
/// ```
pub fn connect_nested_update(
graph: &mut QueryGraph,
parent: &NodeRef,
parent_relation_field: &RelationFieldRef,
value: ParsedInputValue,
child_model: &ModelRef,
) -> QueryGraphBuilderResult<()> {
for value in utils::coerce_vec(value) {
let (data, finder) = if parent_relation_field.is_list {
// We have to have a record specified as a record finder in "where".
// This finder is used to read the children first, to make sure they're actually connected.
// The update itself operates on the ID found by the read check.
let mut map: ParsedInputMap = value.try_into()?;
let where_arg = map.remove("where").unwrap();
let record_finder = extract_record_finder(where_arg, &child_model)?;
let data_value = map.remove("data").unwrap();
(data_value, Some(record_finder))
} else {
(value, None)
};
let find_child_records_node =
utils::insert_find_children_by_parent_node(graph, parent, parent_relation_field, finder)?;
let update_node = update::update_record_node(graph, None, Arc::clone(child_model), data.try_into()?)?;
let id_field = child_model.fields().id();
graph.create_edge(
&find_child_records_node,
&update_node,
QueryGraphDependency::ParentIds(Box::new(|mut node, mut parent_ids| {
let parent_id = match parent_ids.pop() {
Some(pid) => Ok(pid),
None => Err(QueryGraphBuilderError::AssertionError(format!(
"Expected a valid parent ID to be present for nested update to-one case."
))),
}?;
if let Node::Query(Query::Write(WriteQuery::UpdateRecord(ref mut ur))) = node {
ur.where_ = Some(RecordFinder {
field: id_field,
value: parent_id,
});
}
Ok(node)
})),
)?;
}
Ok(())
}
pub fn connect_nested_update_many(
graph: &mut QueryGraph,
parent: &NodeRef,
parent_relation_field: &RelationFieldRef,
value: ParsedInputValue,
child_model: &ModelRef,
) -> QueryGraphBuilderResult<()> {
for value in utils::coerce_vec(value) {
let mut map: ParsedInputMap = value.try_into()?;
let where_arg = map.remove("where").unwrap();
let data_value = map.remove("data").unwrap();
let data_map: ParsedInputMap = data_value.try_into()?;
let where_map: ParsedInputMap = where_arg.try_into()?;
let filter = extract_filter(where_map, child_model)?;
let update_args = WriteArguments::from(&child_model, data_map)?;
let find_child_records_node =
utils::insert_find_children_by_parent_node(graph, parent, parent_relation_field, filter.clone())?;
// TODO: this looks like some duplication from write/update.rs
let update_many = WriteQuery::UpdateManyRecords(UpdateManyRecords {
model: Arc::clone(&child_model),
filter,
non_list_args: update_args.non_list,
list_args: update_args.list,
});
let update_many_node = graph.create_node(Query::Write(update_many));
let id_field = child_model.fields().id();
graph.create_edge(
&find_child_records_node,
&update_many_node,
QueryGraphDependency::ParentIds(Box::new(move |mut node, parent_ids| {
if let Node::Query(Query::Write(WriteQuery::UpdateManyRecords(ref mut ur))) = node {
let ids_filter = id_field.is_in(Some(parent_ids));
let new_filter = Filter::and(vec![ur.filter.clone(), ids_filter]);
ur.filter = new_filter;
}
Ok(node)
})),
)?;
}
Ok(())
}
| 35.2 | 110 | 0.569024 |
ef337e590ae76a1fe12ff6561db5782792879800 | 1,973 | #![warn(rust_2018_idioms)]
use tokio::prelude::*;
use tokio::time::*;
use std::sync::mpsc;
use std::time::{Duration, Instant};
#[test]
fn timer_with_threaded_runtime() {
use tokio::runtime::Runtime;
let rt = Runtime::new().unwrap();
let (tx, rx) = mpsc::channel();
rt.spawn(async move {
let when = Instant::now() + Duration::from_millis(100);
delay(when).await;
assert!(Instant::now() >= when);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn timer_with_current_thread_runtime() {
use tokio::runtime::Builder;
let mut rt = Builder::new().current_thread().build().unwrap();
let (tx, rx) = mpsc::channel();
rt.block_on(async move {
let when = Instant::now() + Duration::from_millis(100);
tokio::time::delay(when).await;
assert!(Instant::now() >= when);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[tokio::test]
async fn starving() {
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
struct Starve<T: Future<Output = ()> + Unpin>(T, u64);
impl<T: Future<Output = ()> + Unpin> Future for Starve<T> {
type Output = u64;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<u64> {
if Pin::new(&mut self.0).poll(cx).is_ready() {
return Poll::Ready(self.1);
}
self.1 += 1;
cx.waker().wake_by_ref();
Poll::Pending
}
}
let when = Instant::now() + Duration::from_millis(20);
let starve = Starve(delay(when), 0);
starve.await;
assert!(Instant::now() >= when);
}
#[tokio::test]
async fn timeout() {
use tokio::sync::oneshot;
let (_tx, rx) = oneshot::channel::<()>();
let now = Instant::now();
let dur = Duration::from_millis(20);
let res = rx.timeout(dur).await;
assert!(res.is_err());
assert!(Instant::now() >= now + dur);
}
| 21.681319 | 78 | 0.551951 |
762375e3018a58ab8b686e4d92d380c113b943a6 | 11,345 | //! Utilities for structured reporting of experimental results.
//!
//! Experimental take on an API using RAII to report experimental results within context
//! somewhat isomorph to the callgraph and output everything as JSON.
//!
//! So far not really successful.
//! While it worked quite well for the CATCHUp experiments, the API is not really robust.
//! Keeping the ContextGuards around pollutes the algorithm code and is a bit error prone.
//! When used in a multithreaded environment, weird stuff will happen.
//! Not really ready for productive use.
//! JSON output is nice though.
use crate::built_info;
use serde_json::{Map, Value};
use std::{cell::RefCell, mem::swap};
pub use serde_json::json;
#[derive(Debug)]
enum ContextStackItem {
Key(String),
Collection(Vec<Value>),
Object(Map<String, Value>),
Throwaway,
}
#[derive(Debug)]
enum CurrentReportingContext {
Collection(Vec<Value>),
Object(Map<String, Value>),
Throwaway,
}
#[derive(Debug)]
pub struct Reporter {
current: CurrentReportingContext,
context_stack: Vec<ContextStackItem>,
}
impl Default for Reporter {
fn default() -> Self {
Reporter {
current: CurrentReportingContext::Object(Map::new()),
context_stack: Vec::new(),
}
}
}
impl Reporter {
fn create_object_under_key(&mut self, key: String) {
match &mut self.current {
CurrentReportingContext::Object(object) => {
let mut tmp = Map::new();
swap(&mut tmp, object);
self.context_stack.push(ContextStackItem::Object(tmp));
self.context_stack.push(ContextStackItem::Key(key));
}
CurrentReportingContext::Collection(_) => {
panic!("Cannot create object at key in collection");
}
CurrentReportingContext::Throwaway => (),
}
}
fn create_collection_under_key(&mut self, key: String) {
match &mut self.current {
CurrentReportingContext::Object(object) => {
let mut tmp = Map::new();
swap(&mut tmp, object);
self.context_stack.push(ContextStackItem::Object(tmp));
self.context_stack.push(ContextStackItem::Key(key));
self.current = CurrentReportingContext::Collection(Vec::new());
}
CurrentReportingContext::Collection(_) => {
panic!("Cannot create collection at key in collection");
}
CurrentReportingContext::Throwaway => (),
}
}
fn create_collection_item(&mut self) {
match &mut self.current {
CurrentReportingContext::Object(_) => {
panic!("Cannot create collection item in object");
}
CurrentReportingContext::Collection(collection) => {
let mut tmp = Vec::new();
swap(&mut tmp, collection);
self.context_stack.push(ContextStackItem::Collection(tmp));
self.current = CurrentReportingContext::Object(Map::new());
}
CurrentReportingContext::Throwaway => (),
}
}
fn block_reporting(&mut self) {
match &mut self.current {
CurrentReportingContext::Object(object) => {
let mut tmp = Map::new();
swap(&mut tmp, object);
self.context_stack.push(ContextStackItem::Object(tmp));
self.current = CurrentReportingContext::Throwaway;
}
CurrentReportingContext::Collection(collection) => {
let mut tmp = Vec::new();
swap(&mut tmp, collection);
self.context_stack.push(ContextStackItem::Collection(tmp));
self.current = CurrentReportingContext::Throwaway;
}
CurrentReportingContext::Throwaway => {
self.context_stack.push(ContextStackItem::Throwaway);
}
}
}
fn report(&mut self, key: String, val: Value) {
match &mut self.current {
CurrentReportingContext::Object(object) => {
let prev = object.insert(key, val);
if !cfg!(feature = "report-allow-override") {
assert!(prev.is_none());
}
}
CurrentReportingContext::Collection(_) => {
panic!("Cannot report value on collection");
}
CurrentReportingContext::Throwaway => (),
}
}
fn pop_context(&mut self) {
if matches!(self.current, CurrentReportingContext::Throwaway) {
return;
}
let parent = self.context_stack.pop().expect("tried to pop from empty context");
match parent {
ContextStackItem::Key(key) => {
let parent = self.context_stack.pop().expect("tried to pop from empty context");
if let ContextStackItem::Object(mut object) = parent {
let mut prev_current = CurrentReportingContext::Object(Default::default());
swap(&mut self.current, &mut prev_current);
let prev = match prev_current {
CurrentReportingContext::Object(cur_object) => object.insert(key, Value::Object(cur_object)),
CurrentReportingContext::Collection(collection) => object.insert(key, Value::Array(collection)),
CurrentReportingContext::Throwaway => None,
};
if !cfg!(feature = "report-allow-override") {
assert_eq!(prev, None);
}
self.current = CurrentReportingContext::Object(object);
} else {
panic!("Inconsistent context stack");
}
}
ContextStackItem::Collection(mut collection) => {
let mut prev_current = CurrentReportingContext::Object(Default::default());
swap(&mut self.current, &mut prev_current);
match prev_current {
CurrentReportingContext::Object(cur_object) => {
collection.push(Value::Object(cur_object));
}
CurrentReportingContext::Collection(_) => {
panic!("Cannot insert collection into collection");
}
CurrentReportingContext::Throwaway => panic!("Inconsistent context stack"),
};
self.current = CurrentReportingContext::Collection(collection);
}
_ => panic!("Inconsistent context stack"),
}
}
fn unblock(&mut self) {
if !matches!(self.current, CurrentReportingContext::Throwaway) {
panic!("Inconsistent context stack");
}
match self.context_stack.pop().expect("tried to pop from empty context") {
ContextStackItem::Key(_) => panic!("Inconsistent context stack"),
ContextStackItem::Collection(collection) => {
self.current = CurrentReportingContext::Collection(collection);
}
ContextStackItem::Object(object) => {
self.current = CurrentReportingContext::Object(object);
}
ContextStackItem::Throwaway => (),
}
}
}
thread_local! {
static REPORTER: RefCell<Option<Reporter>> = RefCell::new(None);
}
#[must_use]
pub struct ContextGuard(());
impl Drop for ContextGuard {
fn drop(&mut self) {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(Reporter::pop_context));
}
}
pub fn push_context(key: String) -> ContextGuard {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(|r| r.create_object_under_key(key)));
ContextGuard(())
}
#[must_use]
pub struct CollectionContextGuard(());
impl Drop for CollectionContextGuard {
fn drop(&mut self) {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(Reporter::pop_context));
}
}
pub fn push_collection_context(key: String) -> CollectionContextGuard {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(|r| r.create_collection_under_key(key)));
CollectionContextGuard(())
}
impl CollectionContextGuard {
pub fn push_collection_item(&mut self) -> CollectionItemContextGuard {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(Reporter::create_collection_item));
CollectionItemContextGuard(self)
}
}
#[must_use]
pub struct CollectionItemContextGuard<'a>(&'a CollectionContextGuard);
impl<'a> Drop for CollectionItemContextGuard<'a> {
fn drop(&mut self) {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(Reporter::pop_context));
}
}
#[must_use]
pub struct BlockedReportingContextGuard();
impl Drop for BlockedReportingContextGuard {
fn drop(&mut self) {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(Reporter::unblock));
}
}
pub fn block_reporting() -> BlockedReportingContextGuard {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(|r| r.block_reporting()));
BlockedReportingContextGuard()
}
pub fn report(key: String, val: Value) {
if cfg!(feature = "report-to-stderr") {
eprintln!("{}: {}", key, val.to_string());
}
report_silent(key, val)
}
pub fn report_silent(key: String, val: Value) {
REPORTER.with(|reporter| reporter.borrow_mut().as_mut().map(|r| r.report(key, val)));
}
#[must_use]
pub struct ReportingGuard(());
impl Drop for ReportingGuard {
fn drop(&mut self) {
REPORTER.with(|reporter| {
if let Some(r) = reporter.borrow_mut().as_mut() {
assert!(r.context_stack.is_empty());
let mut current = CurrentReportingContext::Object(Default::default());
swap(&mut current, &mut r.current);
if let CurrentReportingContext::Object(object) = current {
println!("{}", Value::Object(object).to_string());
} else {
panic!("broken root object for reporting");
}
};
});
}
}
#[macro_export]
macro_rules! report {
($k:expr, $($json:tt)+) => { report($k.to_string(), json!($($json)+)) };
}
#[macro_export]
macro_rules! report_silent {
($k:expr, $($json:tt)+) => { report_silent($k.to_string(), json!($($json)+)) };
}
pub fn enable_reporting(program: &str) -> ReportingGuard {
REPORTER.with(|reporter| reporter.replace(Some(Reporter::default())));
report!("git_revision", built_info::GIT_VERSION.unwrap_or(""));
report!("build_target", built_info::TARGET);
report!("build_profile", built_info::PROFILE);
report!("feature_flags", built_info::FEATURES_STR);
report!("build_time", built_info::BUILT_TIME_UTC);
report!("build_with_rustc", built_info::RUSTC_VERSION);
if let Ok(hostname) = std::process::Command::new("hostname").output() {
report!("hostname", String::from_utf8(hostname.stdout).unwrap().trim());
}
report!("program", program);
report!("start_time", chrono::prelude::Utc::now().to_rfc3339());
report!("args", std::env::args().collect::<Vec<String>>());
ReportingGuard(())
}
pub mod benchmark;
pub use benchmark::*;
| 35.232919 | 120 | 0.59621 |
875bc72f59712c9c390847e290e56fd1056014a8 | 2,289 | use crate::{
app_context::AppContext,
browser_states::BrowserState,
commands::Command,
errors::{ProgramError, TreeBuildError},
external::Launchable,
screens::Screen,
task_sync::TaskLifetime,
};
use std::io::Write;
/// Result of applying a command to a state
pub enum AppStateCmdResult {
Quit,
Keep,
Launch(Box<Launchable>),
DisplayError(String),
NewState(Box<dyn AppState>, Command),
PopStateAndReapply, // the state asks the command be executed on a previous state
PopState,
RefreshState { clear_cache: bool },
}
impl AppStateCmdResult {
pub fn verb_not_found(text: &str) -> AppStateCmdResult {
AppStateCmdResult::DisplayError(format!("verb not found: {:?}", &text))
}
pub fn from_optional_state(
os: Result<Option<BrowserState>, TreeBuildError>,
cmd: Command,
) -> AppStateCmdResult {
match os {
Ok(Some(os)) => AppStateCmdResult::NewState(Box::new(os), cmd),
Ok(None) => AppStateCmdResult::Keep,
Err(e) => AppStateCmdResult::DisplayError(e.to_string()),
}
}
}
impl From<Launchable> for AppStateCmdResult {
fn from(launchable: Launchable) -> Self {
AppStateCmdResult::Launch(Box::new(launchable))
}
}
/// a whole application state, stackable to allow reverting
/// to a previous one
pub trait AppState {
fn apply(
&mut self,
cmd: &mut Command,
screen: &mut Screen,
con: &AppContext,
) -> Result<AppStateCmdResult, ProgramError>;
fn can_execute(&self, verb_index: usize, con: &AppContext) -> bool;
fn refresh(&mut self, screen: &Screen, con: &AppContext) -> Command;
fn do_pending_task(&mut self, screen: &mut Screen, tl: &TaskLifetime);
fn has_pending_task(&self) -> bool;
fn display(
&mut self,
w: &mut dyn Write,
screen: &Screen,
con: &AppContext,
) -> Result<(), ProgramError>;
fn write_flags(
&self,
w: &mut dyn Write,
screen: &mut Screen,
con: &AppContext,
) -> Result<(), ProgramError>;
fn write_status(
&self,
w: &mut dyn Write,
cmd: &Command,
screen: &Screen,
con: &AppContext,
) -> Result<(), ProgramError>;
}
| 26.616279 | 85 | 0.613368 |
56eccc6076dc094fd94f50db6af494f2f5f810e3 | 4,122 | /*
* Copyright 2019 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
analysis::types::{LintT, LintWarning},
codemap::{CodeMap, Span, SpanLoc},
syntax::{
ast::{AstExpr, AstLiteral, Expr},
AstModule,
},
};
use gazebo::variants::VariantName;
use std::collections::HashMap;
use thiserror::Error;
#[derive(Error, Debug, VariantName)]
pub(crate) enum Dubious {
#[error("Duplicate dictionary key `{}`, also used at {}", .0, .1)]
DuplicateKey(String, SpanLoc),
}
impl LintWarning for Dubious {
fn is_serious(&self) -> bool {
true
}
}
// Go implementation of Starlark disallows duplicate top-level assignments,
// it's likely that will become Starlark standard sooner or later, so check now.
// The one place we allow it is to export something you grabbed with load.
fn duplicate_dictionary_key(module: &AstModule, res: &mut Vec<LintT<Dubious>>) {
#[derive(PartialEq, Eq, Hash)]
enum Key<'a> {
Int(i32),
String(&'a str),
Identifier(&'a str),
}
fn to_key<'a>(x: &'a AstExpr) -> Option<(Key<'a>, Span)> {
match &**x {
Expr::Literal(x) => match &*x {
AstLiteral::IntLiteral(x) => Some((Key::Int(x.node), x.span)),
AstLiteral::StringLiteral(x) => Some((Key::String(&x.node), x.span)),
},
Expr::Identifier(x) => Some((Key::Identifier(&x.node), x.span)),
_ => None,
}
}
fn expr<'a>(x: &'a AstExpr, codemap: &CodeMap, res: &mut Vec<LintT<Dubious>>) {
match &**x {
Expr::Dict(args) => {
let mut seen = HashMap::new();
for (key, _) in args {
if let Some((key_id, pos)) = to_key(key) {
if let Some(old) = seen.insert(key_id, pos) {
res.push(LintT::new(
codemap,
old,
Dubious::DuplicateKey(key.to_string(), codemap.look_up_span(pos)),
))
}
}
}
}
_ => {}
}
x.visit_expr(|x| expr(x, codemap, res));
}
module
.statement
.visit_expr(|x| expr(x, &module.codemap, res))
}
pub(crate) fn dubious(module: &AstModule) -> Vec<LintT<Dubious>> {
let mut res = Vec::new();
duplicate_dictionary_key(module, &mut res);
res
}
#[cfg(test)]
mod test {
use super::*;
use crate::syntax::Dialect;
use gazebo::prelude::*;
fn module(x: &str) -> AstModule {
AstModule::parse("X", x.to_owned(), &Dialect::Extended).unwrap()
}
impl Dubious {
fn about(&self) -> &String {
match self {
Dubious::DuplicateKey(x, _) => x,
}
}
}
#[test]
fn test_lint_duplicate_keys() {
let m = module(
r#"
{'no1': 1, 'no1': 2}
{42: 1, 78: 9, 'no2': 100, 42: 6, 'no2': 8}
# Variables can't change as a result of expression evaluation,
# so it's always an error if you see the same expression
{no3: 1, no4: 2, yes: 3, no3: 1, no3: 3, no4: 8}
# Functions can change each time round, so don't lint on them.
{f(): 1, f(): 2}
"#,
);
let mut res = Vec::new();
duplicate_dictionary_key(&m, &mut res);
assert_eq!(
res.map(|x| x.problem.about()),
&["\"no1\"", "42", "\"no2\"", "no3", "no3", "no4"]
);
}
}
| 30.087591 | 98 | 0.543426 |
891dba1e96cb8321220c824c71ebc0779e0c922d | 19,615 | mod atlas;
#[cfg(feature = "image")]
mod raster;
#[cfg(feature = "svg")]
mod vector;
use crate::Transformation;
use atlas::Atlas;
use iced_graphics::layer;
use iced_native::Rectangle;
use std::cell::RefCell;
use std::mem;
use zerocopy::AsBytes;
#[cfg(feature = "image")]
use iced_native::image;
#[cfg(feature = "svg")]
use iced_native::svg;
#[derive(Debug)]
pub struct Pipeline {
#[cfg(feature = "image")]
raster_cache: RefCell<raster::Cache>,
#[cfg(feature = "svg")]
vector_cache: RefCell<vector::Cache>,
pipeline: wgpu::RenderPipeline,
uniforms: wgpu::Buffer,
vertices: wgpu::Buffer,
indices: wgpu::Buffer,
instances: wgpu::Buffer,
constants: wgpu::BindGroup,
texture: wgpu::BindGroup,
texture_version: usize,
texture_layout: wgpu::BindGroupLayout,
texture_atlas: Atlas,
}
impl Pipeline {
pub fn new(device: &wgpu::Device, format: wgpu::TextureFormat) -> Self {
use wgpu::util::DeviceExt;
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let constant_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("iced_wgpu::image constants layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: wgpu::BufferSize::new(
mem::size_of::<Uniforms>() as u64,
),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
});
let uniforms_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("iced_wgpu::image uniforms buffer"),
size: mem::size_of::<Uniforms>() as u64,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let constant_bind_group =
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::image constants bind group"),
layout: &constant_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(
uniforms_buffer.slice(..),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
let texture_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("iced_wgpu::image texture atlas layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Float,
multisampled: false,
},
count: None,
}],
});
let layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("iced_wgpu::image pipeline layout"),
push_constant_ranges: &[],
bind_group_layouts: &[&constant_layout, &texture_layout],
});
let vs_module = device.create_shader_module(wgpu::include_spirv!(
"shader/image.vert.spv"
));
let fs_module = device.create_shader_module(wgpu::include_spirv!(
"shader/image.frag.spv"
));
let pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("iced_wgpu::image pipeline"),
layout: Some(&layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Cw,
cull_mode: wgpu::CullMode::None,
..Default::default()
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[wgpu::VertexAttributeDescriptor {
shader_location: 0,
format: wgpu::VertexFormat::Float2,
offset: 0,
}],
},
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Instance>() as u64,
step_mode: wgpu::InputStepMode::Instance,
attributes: &[
wgpu::VertexAttributeDescriptor {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
offset: 0,
},
wgpu::VertexAttributeDescriptor {
shader_location: 2,
format: wgpu::VertexFormat::Float2,
offset: 4 * 2,
},
wgpu::VertexAttributeDescriptor {
shader_location: 3,
format: wgpu::VertexFormat::Float2,
offset: 4 * 4,
},
wgpu::VertexAttributeDescriptor {
shader_location: 4,
format: wgpu::VertexFormat::Float2,
offset: 4 * 6,
},
wgpu::VertexAttributeDescriptor {
shader_location: 5,
format: wgpu::VertexFormat::Uint,
offset: 4 * 8,
},
],
},
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertices =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("iced_wgpu::image vertex buffer"),
contents: QUAD_VERTS.as_bytes(),
usage: wgpu::BufferUsage::VERTEX,
});
let indices =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("iced_wgpu::image index buffer"),
contents: QUAD_INDICES.as_bytes(),
usage: wgpu::BufferUsage::INDEX,
});
let instances = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("iced_wgpu::image instance buffer"),
size: mem::size_of::<Instance>() as u64 * Instance::MAX as u64,
usage: wgpu::BufferUsage::VERTEX | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let texture_atlas = Atlas::new(device);
let texture = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::image texture atlas bind group"),
layout: &texture_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&texture_atlas.view(),
),
}],
});
Pipeline {
#[cfg(feature = "image")]
raster_cache: RefCell::new(raster::Cache::new()),
#[cfg(feature = "svg")]
vector_cache: RefCell::new(vector::Cache::new()),
pipeline,
uniforms: uniforms_buffer,
vertices,
indices,
instances,
constants: constant_bind_group,
texture,
texture_version: texture_atlas.layer_count(),
texture_layout,
texture_atlas,
}
}
#[cfg(feature = "image")]
pub fn dimensions(&self, handle: &image::Handle) -> (u32, u32) {
let mut cache = self.raster_cache.borrow_mut();
let memory = cache.load(&handle);
memory.dimensions()
}
#[cfg(feature = "svg")]
pub fn viewport_dimensions(&self, handle: &svg::Handle) -> (u32, u32) {
let mut cache = self.vector_cache.borrow_mut();
let svg = cache.load(&handle);
svg.viewport_dimensions()
}
pub fn draw(
&mut self,
device: &wgpu::Device,
staging_belt: &mut wgpu::util::StagingBelt,
encoder: &mut wgpu::CommandEncoder,
images: &[layer::Image],
transformation: Transformation,
bounds: Rectangle<u32>,
target: &wgpu::TextureView,
_scale: f32,
) {
let instances: &mut Vec<Instance> = &mut Vec::new();
#[cfg(feature = "image")]
let mut raster_cache = self.raster_cache.borrow_mut();
#[cfg(feature = "svg")]
let mut vector_cache = self.vector_cache.borrow_mut();
for image in images {
match &image {
#[cfg(feature = "image")]
layer::Image::Raster { handle, bounds } => {
if let Some(atlas_entry) = raster_cache.upload(
handle,
device,
encoder,
&mut self.texture_atlas,
) {
add_instances(
[bounds.x, bounds.y],
[bounds.width, bounds.height],
atlas_entry,
instances,
);
}
}
#[cfg(not(feature = "image"))]
layer::Image::Raster { .. } => {}
#[cfg(feature = "svg")]
layer::Image::Vector { handle, bounds } => {
let size = [bounds.width, bounds.height];
if let Some(atlas_entry) = vector_cache.upload(
handle,
size,
_scale,
device,
encoder,
&mut self.texture_atlas,
) {
add_instances(
[bounds.x, bounds.y],
size,
atlas_entry,
instances,
);
}
}
#[cfg(not(feature = "svg"))]
layer::Image::Vector { .. } => {}
}
}
if instances.is_empty() {
return;
}
let texture_version = self.texture_atlas.layer_count();
if self.texture_version != texture_version {
log::info!("Atlas has grown. Recreating bind group...");
self.texture =
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::image texture atlas bind group"),
layout: &self.texture_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&self.texture_atlas.view(),
),
}],
});
self.texture_version = texture_version;
}
{
let mut uniforms_buffer = staging_belt.write_buffer(
encoder,
&self.uniforms,
0,
wgpu::BufferSize::new(mem::size_of::<Uniforms>() as u64)
.unwrap(),
device,
);
uniforms_buffer.copy_from_slice(
Uniforms {
transform: transformation.into(),
}
.as_bytes(),
);
}
let mut i = 0;
let total = instances.len();
while i < total {
let end = (i + Instance::MAX).min(total);
let amount = end - i;
let mut instances_buffer = staging_belt.write_buffer(
encoder,
&self.instances,
0,
wgpu::BufferSize::new(
(amount * std::mem::size_of::<Instance>()) as u64,
)
.unwrap(),
device,
);
instances_buffer
.copy_from_slice(instances[i..i + amount].as_bytes());
let mut render_pass =
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: target,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: true,
},
},
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.constants, &[]);
render_pass.set_bind_group(1, &self.texture, &[]);
render_pass.set_index_buffer(self.indices.slice(..));
render_pass.set_vertex_buffer(0, self.vertices.slice(..));
render_pass.set_vertex_buffer(1, self.instances.slice(..));
render_pass.set_scissor_rect(
bounds.x,
bounds.y,
bounds.width,
bounds.height,
);
render_pass.draw_indexed(
0..QUAD_INDICES.len() as u32,
0,
0..amount as u32,
);
i += Instance::MAX;
}
}
pub fn trim_cache(&mut self) {
#[cfg(feature = "image")]
self.raster_cache.borrow_mut().trim(&mut self.texture_atlas);
#[cfg(feature = "svg")]
self.vector_cache.borrow_mut().trim(&mut self.texture_atlas);
}
}
#[repr(C)]
#[derive(Clone, Copy, AsBytes)]
pub struct Vertex {
_position: [f32; 2],
}
const QUAD_INDICES: [u16; 6] = [0, 1, 2, 0, 2, 3];
const QUAD_VERTS: [Vertex; 4] = [
Vertex {
_position: [0.0, 0.0],
},
Vertex {
_position: [1.0, 0.0],
},
Vertex {
_position: [1.0, 1.0],
},
Vertex {
_position: [0.0, 1.0],
},
];
#[repr(C)]
#[derive(Debug, Clone, Copy, AsBytes)]
struct Instance {
_position: [f32; 2],
_size: [f32; 2],
_position_in_atlas: [f32; 2],
_size_in_atlas: [f32; 2],
_layer: u32,
}
impl Instance {
pub const MAX: usize = 1_000;
}
#[repr(C)]
#[derive(Debug, Clone, Copy, AsBytes)]
struct Uniforms {
transform: [f32; 16],
}
fn add_instances(
image_position: [f32; 2],
image_size: [f32; 2],
entry: &atlas::Entry,
instances: &mut Vec<Instance>,
) {
match entry {
atlas::Entry::Contiguous(allocation) => {
add_instance(image_position, image_size, allocation, instances);
}
atlas::Entry::Fragmented { fragments, size } => {
let scaling_x = image_size[0] / size.0 as f32;
let scaling_y = image_size[1] / size.1 as f32;
for fragment in fragments {
let allocation = &fragment.allocation;
let [x, y] = image_position;
let (fragment_x, fragment_y) = fragment.position;
let (fragment_width, fragment_height) = allocation.size();
let position = [
x + fragment_x as f32 * scaling_x,
y + fragment_y as f32 * scaling_y,
];
let size = [
fragment_width as f32 * scaling_x,
fragment_height as f32 * scaling_y,
];
add_instance(position, size, allocation, instances);
}
}
}
}
#[inline]
fn add_instance(
position: [f32; 2],
size: [f32; 2],
allocation: &atlas::Allocation,
instances: &mut Vec<Instance>,
) {
let (x, y) = allocation.position();
let (width, height) = allocation.size();
let layer = allocation.layer();
let instance = Instance {
_position: position,
_size: size,
_position_in_atlas: [
(x as f32 + 0.5) / atlas::SIZE as f32,
(y as f32 + 0.5) / atlas::SIZE as f32,
],
_size_in_atlas: [
(width as f32 - 1.0) / atlas::SIZE as f32,
(height as f32 - 1.0) / atlas::SIZE as f32,
],
_layer: layer as u32,
};
instances.push(instance);
}
| 34.291958 | 78 | 0.465817 |
3a55397c391034fe392360baeb5b5faab319405e | 1,096 | // SPDX-License-Identifier: Apache-2.0
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Argv<const N: usize>(pub [usize; N]);
impl From<Argv<0>> for [usize; 6] {
#[inline]
fn from(_: Argv<0>) -> Self {
[0, 0, 0, 0, 0, 0]
}
}
impl From<Argv<1>> for [usize; 6] {
#[inline]
fn from(argv: Argv<1>) -> Self {
[argv.0[0], 0, 0, 0, 0, 0]
}
}
impl From<Argv<2>> for [usize; 6] {
#[inline]
fn from(argv: Argv<2>) -> Self {
[argv.0[0], argv.0[1], 0, 0, 0, 0]
}
}
impl From<Argv<3>> for [usize; 6] {
#[inline]
fn from(argv: Argv<3>) -> Self {
[argv.0[0], argv.0[1], argv.0[2], 0, 0, 0]
}
}
impl From<Argv<4>> for [usize; 6] {
#[inline]
fn from(argv: Argv<4>) -> Self {
[argv.0[0], argv.0[1], argv.0[2], argv.0[3], 0, 0]
}
}
impl From<Argv<5>> for [usize; 6] {
#[inline]
fn from(argv: Argv<5>) -> Self {
[argv.0[0], argv.0[1], argv.0[2], argv.0[3], argv.0[4], 0]
}
}
impl From<Argv<6>> for [usize; 6] {
#[inline]
fn from(argv: Argv<6>) -> Self {
argv.0
}
}
| 20.296296 | 66 | 0.486314 |
50893990a3957d92dd1d895cc54e285e53834866 | 1,012 | use std::default::Default;
use html5ever::parse_document;
use html5ever::rcdom::RcDom;
use html5ever::tendril::TendrilSink;
use std::io::Cursor;
use hierarchy::Hierarchy;
use super::walker::Walker;
use index::Index;
use dom::Dom;
pub fn parse(s: &str) -> Dom {
let dom = parse_document(RcDom::default(), Default::default())
.from_utf8()
.read_from(&mut Cursor::new(s.as_bytes()))
.unwrap();
let indexer = Index::new();
let mut walker = Walker::new(Hierarchy::new(), indexer);
walker.walk(dom.document, None);
walker.into_dom()
}
#[cfg(test)]
mod tests {
use tendril::{SliceExt, StrTendril};
#[test]
fn test_tendril_equal() {
let short1: StrTendril = "a123bc".into();
let short2: StrTendril = "a123bc".to_tendril();
assert_eq!(short1, short2);
let long1: StrTendril = "a123bc111111111111111111".into();
let long2: StrTendril = "a123bc111111111111111111".to_tendril();
assert_eq!(long1, long2);
}
}
| 25.3 | 72 | 0.641304 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.