hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
626cc98d5ca2157a63ea559e886519251ba1aaef | 4,931 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Auxiliary module for configuring the logger.
extern crate serde_json;
use libc::O_NONBLOCK;
use std::fmt::{Display, Formatter};
use std::fs::{File, OpenOptions};
use std::io::{LineWriter, Write};
use std::os::unix::fs::OpenOptionsExt;
use std::path::PathBuf;
use std::sync::{Mutex, MutexGuard};
use self::serde_json::Value;
type Result<T> = std::result::Result<T, std::io::Error>;
/// Structure `LoggerWriter` used for writing to a FIFO.
pub struct LoggerWriter {
line_writer: Mutex<LineWriter<File>>,
}
impl LoggerWriter {
/// Create and open a FIFO for writing to it.
/// In order to not block the instance if nobody is consuming the logs that are flushed to the
/// two pipes, we are opening them with `O_NONBLOCK` flag. In this case, writing to a pipe will
/// start failing when reaching 64K of unconsumed content. Simultaneously,
/// the `missed_metrics_count` metric will get increased.
///
pub fn new(fifo_path: &str) -> Result<LoggerWriter> {
let fifo = PathBuf::from(fifo_path);
OpenOptions::new()
.custom_flags(O_NONBLOCK)
.read(true)
.write(true)
.open(&fifo)
.map(|t| LoggerWriter {
line_writer: Mutex::new(LineWriter::new(t)),
})
}
fn get_line_writer(&self) -> MutexGuard<LineWriter<File>> {
match self.line_writer.lock() {
Ok(guard) => guard,
// If a thread panics while holding this lock, the writer within should still be usable.
// (we might get an incomplete log line or something like that).
Err(poisoned) => poisoned.into_inner(),
}
}
}
impl Write for LoggerWriter {
fn write(&mut self, msg: &[u8]) -> Result<(usize)> {
let mut line_writer = self.get_line_writer();
line_writer.write_all(msg).map(|()| msg.len())
}
fn flush(&mut self) -> Result<()> {
let mut line_writer = self.get_line_writer();
line_writer.flush()
}
}
/// Enum used for setting the log level.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum LoggerLevel {
/// When the level is set to `Error`, the logger will only contain entries
/// that come from the `error` macro.
Error,
/// When the level is set to `Warning`, the logger will only contain entries
/// that come from the `error` and `warn` macros.
Warning,
/// When the level is set to `Info`, the logger will only contain entries
/// that come from the `error`, `warn` and `info` macros.
Info,
/// The most verbose log level.
Debug,
}
/// Strongly typed structure used to describe the logger.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct LoggerConfig {
/// Named pipe used as output for logs.
pub log_fifo: String,
/// Named pipe used as output for metrics.
pub metrics_fifo: String,
/// The level of the Logger.
#[serde(default = "default_level")]
pub level: LoggerLevel,
/// When enabled, the logger will append to the output the severity of the log entry.
#[serde(default)]
pub show_level: bool,
/// When enabled, the logger will append the origin of the log entry.
#[serde(default)]
pub show_log_origin: bool,
/// Additional logging options.
#[cfg(target_arch = "x86_64")]
#[serde(default = "default_log_options")]
pub options: Value,
}
fn default_level() -> LoggerLevel {
LoggerLevel::Warning
}
fn default_log_options() -> Value {
Value::Array(vec![])
}
/// Errors associated with actions on the `LoggerConfig`.
#[derive(Debug)]
pub enum LoggerConfigError {
/// Cannot initialize the logger due to bad user input.
InitializationFailure(String),
/// Cannot flush the metrics.
FlushMetrics(String),
}
impl Display for LoggerConfigError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::LoggerConfigError::*;
match *self {
InitializationFailure(ref err_msg) => write!(f, "{}", err_msg.replace("\"", "")),
FlushMetrics(ref err_msg) => write!(f, "{}", err_msg.replace("\"", "")),
}
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use self::tempfile::NamedTempFile;
use super::*;
#[test]
fn test_log_writer() {
let log_file_temp =
NamedTempFile::new().expect("Failed to create temporary output logging file.");
let good_file = String::from(log_file_temp.path().to_path_buf().to_str().unwrap());
let res = LoggerWriter::new(&good_file);
assert!(res.is_ok());
let mut fw = res.unwrap();
let msg = String::from("some message");
assert!(fw.write(&msg.as_bytes()).is_ok());
assert!(fw.flush().is_ok());
}
}
| 32.440789 | 100 | 0.635774 |
28d1ce213f737e5924c411d29460578a73b83add | 1,194 | //! Module `highlight` provides semantic highlight services.
use super::response::*;
use crate::program::services::references as refs;
use crate::Program;
use tower_lsp::lsp_types::*;
impl Program {
/// Returns semantic highlighting ranges relevant to a offset in a document.
/// If the offset is over an identifier (variable), all references to that variable are returned.
/// Otherwise, nothing is returned.
pub fn get_semantic_highlights(&self, offset: usize) -> Option<Vec<ProgramHighlight>> {
// The nice thing is that the references service already does most of the work to get
// references, so we can just piggyback off that and translate types accordingly.
let references = self.get_kinded_references(offset)?;
let references = references
.into_iter()
.map(|rk| ProgramHighlight {
span: *rk.span(),
kind: match rk {
refs::ReferenceKind::Definition(_) => DocumentHighlightKind::Write,
refs::ReferenceKind::Usage(_) => DocumentHighlightKind::Read,
},
})
.collect();
Some(references)
}
}
| 38.516129 | 101 | 0.630653 |
38f43f745711c7685e22e7a9f5577cd484a49483 | 10,514 | use std::time::{Duration, UNIX_EPOCH};
use assign::assign;
use matches::assert_matches;
#[cfg(feature = "unstable-pre-spec")]
use ruma_events::{
key::verification::VerificationMethod, room::message::KeyVerificationRequestEventContent,
};
use ruma_events::{
room::{
message::{
AudioMessageEventContent, CustomEventContent, MessageEvent, MessageEventContent,
MessageType, Relation, TextMessageEventContent,
},
relationships::InReplyTo,
},
Unsigned,
};
#[cfg(feature = "unstable-pre-spec")]
use ruma_identifiers::DeviceIdBox;
use ruma_identifiers::{event_id, mxc_uri, room_id, user_id};
use ruma_serde::Raw;
use serde_json::{from_value as from_json_value, json, to_value as to_json_value};
macro_rules! json_object {
( $($key:expr => $value:expr),* $(,)? ) => {
{
let mut _map = serde_json::Map::<String, serde_json::Value>::new();
$(
let _ = _map.insert($key, $value);
)*
_map
}
};
}
#[test]
fn serialization() {
let ev = MessageEvent {
content: MessageEventContent::new(MessageType::Audio(AudioMessageEventContent {
body: "test".into(),
info: None,
url: Some(mxc_uri!("mxc://example.org/ffed755USFFxlgbQYZGtryd")),
file: None,
})),
event_id: event_id!("$143273582443PhrSn:example.org"),
origin_server_ts: UNIX_EPOCH + Duration::from_millis(10_000),
room_id: room_id!("!testroomid:example.org"),
sender: user_id!("@user:example.org"),
unsigned: Unsigned::default(),
};
assert_eq!(
to_json_value(ev).unwrap(),
json!({
"type": "m.room.message",
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 10_000,
"room_id": "!testroomid:example.org",
"sender": "@user:example.org",
"content": {
"body": "test",
"msgtype": "m.audio",
"url": "mxc://example.org/ffed755USFFxlgbQYZGtryd",
}
})
);
}
#[test]
fn content_serialization() {
let message_event_content =
MessageEventContent::new(MessageType::Audio(AudioMessageEventContent {
body: "test".into(),
info: None,
url: Some(mxc_uri!("mxc://example.org/ffed755USFFxlgbQYZGtryd")),
file: None,
}));
assert_eq!(
to_json_value(&message_event_content).unwrap(),
json!({
"body": "test",
"msgtype": "m.audio",
"url": "mxc://example.org/ffed755USFFxlgbQYZGtryd"
})
);
}
#[test]
fn custom_content_serialization() {
let json_data = json_object! {
"custom_field".into() => json!("baba"),
"another_one".into() => json!("abab"),
};
let custom_event_content = MessageType::_Custom(CustomEventContent {
msgtype: "my_custom_msgtype".into(),
data: json_data,
});
assert_eq!(
to_json_value(&custom_event_content).unwrap(),
json!({
"msgtype": "my_custom_msgtype",
"custom_field": "baba",
"another_one": "abab",
})
);
}
#[test]
fn custom_content_deserialization() {
let json_data = json!({
"msgtype": "my_custom_msgtype",
"custom_field": "baba",
"another_one": "abab",
});
let expected_json_data = json_object! {
"custom_field".into() => json!("baba"),
"another_one".into() => json!("abab"),
};
assert_matches!(
from_json_value::<Raw<MessageType>>(json_data)
.unwrap()
.deserialize()
.unwrap(),
MessageType::_Custom(CustomEventContent {
msgtype,
data
}) if msgtype == "my_custom_msgtype"
&& data == expected_json_data
);
}
#[test]
fn formatted_body_serialization() {
let message_event_content =
MessageEventContent::text_html("Hello, World!", "Hello, <em>World</em>!");
assert_eq!(
to_json_value(&message_event_content).unwrap(),
json!({
"body": "Hello, World!",
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"formatted_body": "Hello, <em>World</em>!",
})
);
}
#[test]
fn plain_text_content_serialization() {
let message_event_content =
MessageEventContent::text_plain("> <@test:example.com> test\n\ntest reply");
assert_eq!(
to_json_value(&message_event_content).unwrap(),
json!({
"body": "> <@test:example.com> test\n\ntest reply",
"msgtype": "m.text"
})
);
}
#[test]
fn relates_to_content_serialization() {
let message_event_content =
assign!(MessageEventContent::text_plain("> <@test:example.com> test\n\ntest reply"), {
relates_to: Some(Relation::Reply {
in_reply_to: InReplyTo { event_id: event_id!("$15827405538098VGFWH:example.com") },
}),
});
let json_data = json!({
"body": "> <@test:example.com> test\n\ntest reply",
"msgtype": "m.text",
"m.relates_to": {
"m.in_reply_to": {
"event_id": "$15827405538098VGFWH:example.com"
}
}
});
assert_eq!(to_json_value(&message_event_content).unwrap(), json_data);
}
#[test]
#[cfg(not(feature = "unstable-pre-spec"))]
fn edit_deserialization_061() {
let json_data = json!({
"body": "s/foo/bar",
"msgtype": "m.text",
"m.relates_to": {
"rel_type": "m.replace",
"event_id": event_id!("$1598361704261elfgc:localhost"),
},
"m.new_content": {
"body": "bar",
},
});
assert_matches!(
from_json_value::<MessageEventContent>(json_data).unwrap(),
MessageEventContent {
msgtype: MessageType::Text(TextMessageEventContent {
body,
formatted: None,
..
}),
relates_to: Some(Relation::_Custom(_)),
..
} if body == "s/foo/bar"
);
}
#[test]
#[cfg(feature = "unstable-pre-spec")]
fn edit_deserialization_future() {
use ruma_events::room::relationships::Replacement;
let ev_id = event_id!("$1598361704261elfgc:localhost");
let json_data = json!({
"body": "s/foo/bar",
"msgtype": "m.text",
"m.relates_to": {
"rel_type": "m.replace",
"event_id": ev_id,
},
"m.new_content": {
"body": "bar",
"msgtype": "m.text",
},
});
assert_matches!(
from_json_value::<MessageEventContent>(json_data).unwrap(),
MessageEventContent {
msgtype: MessageType::Text(TextMessageEventContent {
body,
formatted: None,
..
}),
relates_to: Some(Relation::Replacement(Replacement { event_id })),
new_content: Some(new_content),
..
} if body == "s/foo/bar"
&& event_id == ev_id
&& matches!(
&*new_content,
MessageEventContent {
msgtype: MessageType::Text(TextMessageEventContent {
body,
formatted: None,
..
}),
..
} if body == "bar"
)
);
}
#[test]
#[cfg(feature = "unstable-pre-spec")]
fn verification_request_deserialization() {
let user_id = user_id!("@example2:localhost");
let device_id: DeviceIdBox = "XOWLHHFSWM".into();
let json_data = json!({
"body": "@example:localhost is requesting to verify your key, ...",
"msgtype": "m.key.verification.request",
"to": user_id,
"from_device": device_id,
"methods": [
"m.sas.v1",
"m.qr_code.show.v1",
"m.reciprocate.v1"
]
});
assert_matches!(
from_json_value::<MessageEventContent>(json_data).unwrap(),
MessageEventContent {
msgtype: MessageType::VerificationRequest(KeyVerificationRequestEventContent {
body,
to,
from_device,
methods,
}),
..
} if body == "@example:localhost is requesting to verify your key, ..."
&& to == user_id
&& from_device == device_id
&& methods.contains(&VerificationMethod::MSasV1)
);
}
#[test]
#[cfg(feature = "unstable-pre-spec")]
fn verification_request_serialization() {
let user_id = user_id!("@example2:localhost");
let device_id: DeviceIdBox = "XOWLHHFSWM".into();
let body = "@example:localhost is requesting to verify your key, ...".to_owned();
let methods = vec![
VerificationMethod::MSasV1,
VerificationMethod::_Custom("m.qr_code.show.v1".to_owned()),
VerificationMethod::_Custom("m.reciprocate.v1".to_owned()),
];
let json_data = json!({
"body": body,
"msgtype": "m.key.verification.request",
"to": user_id,
"from_device": device_id,
"methods": methods
});
let content = MessageType::VerificationRequest(KeyVerificationRequestEventContent {
to: user_id,
from_device: device_id,
body,
methods,
});
assert_eq!(to_json_value(&content).unwrap(), json_data,);
}
#[test]
fn content_deserialization() {
let json_data = json!({
"body": "test",
"msgtype": "m.audio",
"url": "mxc://example.org/ffed755USFFxlgbQYZGtryd"
});
assert_matches!(
from_json_value::<Raw<MessageEventContent>>(json_data)
.unwrap()
.deserialize()
.unwrap(),
MessageEventContent {
msgtype: MessageType::Audio(AudioMessageEventContent {
body,
info: None,
url: Some(url),
file: None,
}),
..
} if body == "test" && url.to_string() == "mxc://example.org/ffed755USFFxlgbQYZGtryd"
);
}
#[test]
fn content_deserialization_failure() {
let json_data = json!({
"body": "test","msgtype": "m.location",
"url": "http://example.com/audio.mp3"
});
assert!(from_json_value::<Raw<MessageEventContent>>(json_data).unwrap().deserialize().is_err());
}
| 28.964187 | 100 | 0.545653 |
8fc4ac93daa68115d0834b97d9a8ddcbc6dccfb6 | 4,282 | /// An iterator that knows its exact length.
///
/// Many [`Iterator`]s don't know how many times they will iterate, but some do.
/// If an iterator knows how many times it can iterate, providing access to
/// that information can be useful. For example, if you want to iterate
/// backwards, a good start is to know where the end is.
///
/// When implementing an `ExactSizeIterator`, you must also implement
/// [`Iterator`]. When doing so, the implementation of [`size_hint`] *must*
/// return the exact size of the iterator.
///
/// [`Iterator`]: trait.Iterator.html
/// [`size_hint`]: trait.Iterator.html#method.size_hint
///
/// The [`len`] method has a default implementation, so you usually shouldn't
/// implement it. However, you may be able to provide a more performant
/// implementation than the default, so overriding it in this case makes sense.
///
/// [`len`]: #method.len
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // a finite range knows exactly how many times it will iterate
/// let five = 0..5;
///
/// assert_eq!(5, five.len());
/// ```
///
/// In the [module level docs][moddocs], we implemented an [`Iterator`],
/// `Counter`. Let's implement `ExactSizeIterator` for it as well:
///
/// [moddocs]: index.html
///
/// ```
/// # struct Counter {
/// # count: usize,
/// # }
/// # impl Counter {
/// # fn new() -> Counter {
/// # Counter { count: 0 }
/// # }
/// # }
/// # impl Iterator for Counter {
/// # type Item = usize;
/// # fn next(&mut self) -> Option<Self::Item> {
/// # self.count += 1;
/// # if self.count < 6 {
/// # Some(self.count)
/// # } else {
/// # None
/// # }
/// # }
/// # }
/// impl ExactSizeIterator for Counter {
/// // We can easily calculate the remaining number of iterations.
/// fn len(&self) -> usize {
/// 5 - self.count
/// }
/// }
///
/// // And now we can use it!
///
/// let counter = Counter::new();
///
/// assert_eq!(5, counter.len());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ExactSizeIterator: Iterator {
/// Returns the exact number of times the iterator will iterate.
///
/// This method has a default implementation, so you usually should not
/// implement it directly. However, if you can provide a more efficient
/// implementation, you can do so. See the [trait-level] docs for an
/// example.
///
/// This function has the same safety guarantees as the [`size_hint`]
/// function.
///
/// [trait-level]: trait.ExactSizeIterator.html
/// [`size_hint`]: trait.Iterator.html#method.size_hint
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // a finite range knows exactly how many times it will iterate
/// let five = 0..5;
///
/// assert_eq!(5, five.len());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn len(&self) -> usize {
let (lower, upper) = self.size_hint();
// Note: This assertion is overly defensive, but it checks the invariant
// guaranteed by the trait. If this trait were rust-internal,
// we could use debug_assert!; assert_eq! will check all Rust user
// implementations too.
assert_eq!(upper, Some(lower));
lower
}
/// Returns `true` if the iterator is empty.
///
/// This method has a default implementation using `self.len()`, so you
/// don't need to implement it yourself.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// #![feature(exact_size_is_empty)]
///
/// let mut one_element = std::iter::once(0);
/// assert!(!one_element.is_empty());
///
/// assert_eq!(one_element.next(), Some(0));
/// assert!(one_element.is_empty());
///
/// assert_eq!(one_element.next(), None);
/// ```
#[inline]
#[unstable(feature = "exact_size_is_empty", issue = "35428")]
fn is_empty(&self) -> bool {
self.len() == 0
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for &mut I {
fn len(&self) -> usize {
(**self).len()
}
fn is_empty(&self) -> bool {
(**self).is_empty()
}
}
| 29.736111 | 80 | 0.569127 |
1a4ff88e59b442253cf18c7311782cb7ae4f0460 | 3,604 | // xdr:pic
use super::super::super::Anchor;
use super::NonVisualPictureProperties;
use super::BlipFill;
use super::ShapeProperties;
use writer::driver::*;
use quick_xml::events::{Event, BytesStart};
use quick_xml::Writer;
use quick_xml::Reader;
use std::io::Cursor;
use tempdir::TempDir;
#[derive(Default, Debug)]
pub struct Picture {
anchor: Anchor,
non_visual_picture_properties: NonVisualPictureProperties,
blip_fill: BlipFill,
shape_properties: ShapeProperties,
}
impl Picture {
pub fn get_anchor(&self) -> &Anchor {
&self.anchor
}
pub fn get_anchor_mut(&mut self) -> &mut Anchor {
&mut self.anchor
}
pub fn set_anchor(&mut self, value:Anchor) {
self.anchor = value;
}
pub fn get_non_visual_picture_properties(&self) -> &NonVisualPictureProperties {
&self.non_visual_picture_properties
}
pub fn get_non_visual_picture_properties_mut(&mut self) -> &mut NonVisualPictureProperties {
&mut self.non_visual_picture_properties
}
pub fn set_non_visual_picture_properties(&mut self, value:NonVisualPictureProperties) {
self.non_visual_picture_properties = value;
}
pub fn get_blip_fill(&self) -> &BlipFill {
&self.blip_fill
}
pub fn get_blip_fill_mut(&mut self) -> &mut BlipFill {
&mut self.blip_fill
}
pub fn set_blip_fill(&mut self, value:BlipFill) {
self.blip_fill = value;
}
pub fn get_shape_properties(&self) -> &ShapeProperties {
&self.shape_properties
}
pub fn get_shape_properties_mut(&mut self) -> &mut ShapeProperties {
&mut self.shape_properties
}
pub fn set_shape_properties(&mut self, value:ShapeProperties) {
self.shape_properties = value;
}
pub(crate) fn set_attributes(
&mut self,
reader:&mut Reader<std::io::BufReader<std::fs::File>>,
_e:&BytesStart,
dir: &TempDir,
target: &str,
) {
let mut buf = Vec::new();
loop {
match reader.read_event(&mut buf) {
Ok(Event::Start(ref e)) => {
match e.name() {
b"xdr:nvPicPr" => {
&mut self.non_visual_picture_properties.set_attributes(reader, e);
},
b"xdr:blipFill" => {
&mut self.blip_fill.set_attributes(reader, e, dir, target);
},
b"xdr:spPr" => {
&mut self.shape_properties.set_attributes(reader, e);
},
_ => (),
}
},
Ok(Event::End(ref e)) => {
match e.name() {
b"xdr:pic" => return,
_ => (),
}
},
Ok(Event::Eof) => panic!("Error not find {} end element", "xdr:pic"),
Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e),
_ => (),
}
buf.clear();
}
}
pub(crate) fn write_to(&self, writer: &mut Writer<Cursor<Vec<u8>>>, r_id: &i32) {
// xdr:pic
write_start_tag(writer, "xdr:pic", vec![], false);
// xdr:nvPicPr
&self.non_visual_picture_properties.write_to(writer);
// xdr:blipFill
&self.blip_fill.write_to(writer, r_id);
// xdr:spPr
&self.shape_properties.write_to(writer);
write_end_tag(writer, "xdr:pic");
}
}
| 29.064516 | 96 | 0.543563 |
bfe3e68e796934f1ac8dfc5434f639895a901563 | 2,065 | #[doc = "Register `IRQ_EDGE` reader"]
pub struct R(crate::R<IRQ_EDGE_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<IRQ_EDGE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<IRQ_EDGE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<IRQ_EDGE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `IRQ_EDGE` writer"]
pub struct W(crate::W<IRQ_EDGE_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<IRQ_EDGE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<IRQ_EDGE_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<IRQ_EDGE_SPEC>) -> Self {
W(writer)
}
}
impl W {
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Interrupt Both Edge Register (1:Both Edges, 0:Single Edge)\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [irq_edge](index.html) module"]
pub struct IRQ_EDGE_SPEC;
impl crate::RegisterSpec for IRQ_EDGE_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [irq_edge::R](R) reader structure"]
impl crate::Readable for IRQ_EDGE_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [irq_edge::W](W) writer structure"]
impl crate::Writable for IRQ_EDGE_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets IRQ_EDGE to value 0"]
impl crate::Resettable for IRQ_EDGE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 31.769231 | 447 | 0.625182 |
18b9663c2bb382aed2b34fb7e34ded2e78683e74 | 3,011 | #![no_std]
#![feature(asm)]
#![feature(lang_items)]
#![feature(const_fn)]
#![feature(unique, const_unique_new)]
#![feature(slice_rotate)]
#![feature(try_from)]
#![feature(nll)]
#![feature(inclusive_range_syntax)]
#![feature(type_ascription)]
#![feature(ptr_internals)]
#![feature(abi_x86_interrupt)]
extern crate rlibc;
extern crate volatile;
extern crate spin;
extern crate x86_64;
extern crate array_init; // Used as a workaround until const-generics arrives
#[macro_use]
extern crate bitflags;
#[macro_use]
extern crate lazy_static;
use drivers::keyboard::{Keyboard, KeyEventType, Ps2Keyboard};
use drivers::keyboard::keymap;
use drivers::ps2;
use terminal::TerminalOutput;
mod lang;
#[macro_use]
mod log;
#[macro_use]
mod util;
#[macro_use]
mod color;
mod io;
mod interrupts;
#[macro_use]
mod terminal;
mod drivers;
/// Kernel main function
#[no_mangle]
pub extern fn kmain() -> ! {
interrupts::init();
terminal::STDOUT.write().clear().expect("Screen clear failed");
print_flower().expect("Flower print failed");
terminal::STDOUT.write().set_color(color!(Green on Black))
.expect("Color should be supported");
// Print boot message
println!("Flower kernel boot!");
println!("-------------------\n");
// Reset colors
terminal::STDOUT.write().set_color(color!(White on Black))
.expect("Color should be supported");
let mut controller = ps2::CONTROLLER.lock();
match controller.initialize() {
Ok(_) => info!("ps2c: init successful"),
Err(error) => error!("ps2c: {:?}", error),
}
let keyboard_device = controller.device(ps2::DevicePort::Keyboard);
let mut keyboard = Ps2Keyboard::new(keyboard_device);
if let Ok(_) = keyboard.enable() {
info!("kbd: successfully enabled");
loop {
if let Ok(Some(event)) = keyboard.read_event() {
if event.event_type != KeyEventType::Break {
if event.keycode == keymap::codes::BACKSPACE {
// Ignore error
let _ = terminal::STDOUT.write().backspace();
} else if let Some(character) = event.char {
print!("{}", character)
}
}
}
}
} else {
error!("kbd: enable unsuccessful");
}
halt()
}
fn print_flower() -> Result<(), terminal::TerminalOutputError<()>> {
const FLOWER: &'static str = include_str!("resources/art/flower.txt");
const FLOWER_STEM: &'static str = include_str!("resources/art/flower_stem.txt");
let mut stdout = terminal::STDOUT.write();
let old = stdout.cursor_pos();
stdout.write_string_colored(FLOWER, color!(LightBlue on Black))?;
stdout.write_string_colored(FLOWER_STEM, color!(Green on Black))?;
stdout.set_cursor_pos(old)
}
fn halt() -> ! {
unsafe {
// Disable interrupts
asm!("cli");
// Halt forever...
loop {
asm!("hlt");
}
}
}
| 25.735043 | 84 | 0.611425 |
8fa4e81c88955ae3e6ce4756eec74e0aae95e9dc | 860 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:noexporttypelib.rs
extern crate noexporttypelib;
fn main() {
// Here, the type returned by foo() is not exported.
// This used to cause internal errors when serializing
// because the def_id associated with the type was
// not convertible to a path.
let x: isize = noexporttypelib::foo();
//~^ ERROR expected `isize`, found `core::option::Option<isize>`
}
| 37.391304 | 68 | 0.718605 |
61e27560e03468bc50f8d20efea134b10d4dc980 | 1,551 | // File: common/vec2.rs
// Author: Jacob Guenther
// Date: December 2020
/*
Copyright 2020 Jacob Guenther
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Hash)]
pub struct Vec2<T> {
pub x: T,
pub y: T,
}
impl<T> Vec2<T> {
pub fn new(x: T, y: T) -> Self {
Self { x, y }
}
pub fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
impl Default for Vec2<i32> {
fn default() -> Self {
Self { x: 0, y: 0 }
}
}
impl Default for Vec2<usize> {
fn default() -> Self {
Self { x: 0, y: 0 }
}
}
| 31.653061 | 80 | 0.729852 |
5b091f80df6ab372adc60218b6cf06b43a690b92 | 1,903 | pub use arguments::get_matches;
pub use parser::read_commands;
use crate::commands::Command;
pub mod arguments;
pub mod parser;
pub const NAME_FILE_IN: &str = "INPUT_PATH";
pub const NAME_FILE_OUT: &str = "OUTPUT_PATH";
const ARG_BLUR: &str = "blur";
const ARG_BRIGHTEN: &str = "brighten";
const ARG_CONTRAST: &str = "contrast";
const ARG_COMBINE_TL: &str = "combine_tl";
const ARG_COMBINE_TR: &str = "combine_tr";
const ARG_COMBINE_BL: &str = "combine_bl";
const ARG_COMBINE_BR: &str = "combine_br";
const ARG_CROP_BOX: &str = "crop_box";
const ARG_CROP_RATIO: &str = "crop_ratio";
const ARG_EXIF: &str = "exif";
const ARG_FLIP_HORIZONTAL: &str = "flip_horizontal";
const ARG_FLIP_VERTICAL: &str = "flip_vertical";
const ARG_HUEROTATE: &str = "huerotate";
const ARG_INVERT: &str = "invert";
const ARG_RESIZE: &str = "resize";
const ARG_RESIZE_N: &str = "resize_n";
const ARG_RESIZE_T: &str = "resize_t";
const ARG_RESIZE_C: &str = "resize_c";
const ARG_RESIZE_G: &str = "resize_g";
const ARG_RESIZE_L: &str = "resize_l";
const ARG_ROTATE90: &str = "rotate90";
const ARG_ROTATE180: &str = "rotate180";
const ARG_ROTATE270: &str = "rotate270";
const ARG_TEXT_TL: &str = "text_tl";
const ARG_TEXT_TR: &str = "text_tr";
const ARG_TEXT_BL: &str = "text_bl";
const ARG_TEXT_BR: &str = "text_br";
const ARG_UNSHARPEN: &str = "unsharpen";
const ARG_PRESET: &str = "preset";
const VAL_COMBINE: [&str; 3] = ["IMAGE_PATH", "x_offset", "y_offset"];
const VAL_RESIZE: [&str; 3] = ["nwidth", "nheight", "exact"];
const VAL_TEXT: [&str; 3] = ["text", "x_offset", "y_offset"];
const PRESETS: [&str; 3] = ["app_copyright", "full_hd", "background"];
/// Representation of the command-list as a struct
pub struct Commands {
/// Contains the implementors of `Command` to apply a list of operations, which are provided by `thumbnailer`, on the supplied image(s)
pub(crate) commands: Vec<Box<dyn Command>>,
}
| 35.90566 | 139 | 0.712034 |
d976df82577ead214fc13499f280b33d1b658449 | 18,545 | //! Intrinsics and other functions that the miri engine executes without
//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
//! and miri.
use std::convert::TryFrom;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
interpret::{ConstValue, GlobalId, InterpResult, Scalar},
BinOp,
};
use rustc_middle::ty;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
use super::{ImmTy, InterpCx, Machine, OpTy, PlaceTy};
mod caller_location;
mod type_name;
fn numeric_intrinsic<'tcx, Tag>(
name: Symbol,
bits: u128,
kind: Primitive,
) -> InterpResult<'tcx, Scalar<Tag>> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
};
let extra = 128 - u128::from(size.bits());
let bits_out = match name {
sym::ctpop => u128::from(bits.count_ones()),
sym::ctlz => u128::from(bits.leading_zeros()) - extra,
sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
sym::bswap => (bits << extra).swap_bytes(),
sym::bitreverse => (bits << extra).reverse_bits(),
_ => bug!("not a numeric intrinsic: {}", name),
};
Ok(Scalar::from_uint(bits_out, size))
}
/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
crate fn eval_nullary_intrinsic<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
substs: SubstsRef<'tcx>,
) -> InterpResult<'tcx, ConstValue<'tcx>> {
let tp_ty = substs.type_at(0);
let name = tcx.item_name(def_id);
Ok(match name {
sym::type_name => {
let alloc = type_name::alloc_type_name(tcx, tp_ty);
ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
}
sym::needs_drop => ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)),
sym::size_of | sym::min_align_of | sym::pref_align_of => {
let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
let n = match name {
sym::pref_align_of => layout.align.pref.bytes(),
sym::min_align_of => layout.align.abi.bytes(),
sym::size_of => layout.size.bytes(),
_ => bug!(),
};
ConstValue::from_machine_usize(n, &tcx)
}
sym::type_id => ConstValue::from_u64(tcx.type_id_hash(tp_ty)),
other => bug!("`{}` is not a zero arg intrinsic", other),
})
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Returns `true` if emulation happened.
pub fn emulate_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
) -> InterpResult<'tcx, bool> {
let substs = instance.substs;
let intrinsic_name = self.tcx.item_name(instance.def_id());
// First handle intrinsics without return place.
let (dest, ret) = match ret {
None => match intrinsic_name {
sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
sym::abort => M::abort(self)?,
// Unsupported diverging intrinsic.
_ => return Ok(false),
},
Some(p) => p,
};
// Keep the patterns in this match ordered the same as the list in
// `src/librustc_middle/ty/constness.rs`
match intrinsic_name {
sym::caller_location => {
let span = self.find_closest_untracked_caller_location();
let location = self.alloc_caller_location_for_span(span);
self.write_scalar(location.ptr, dest)?;
}
sym::min_align_of
| sym::pref_align_of
| sym::needs_drop
| sym::size_of
| sym::type_id
| sym::type_name => {
let gid = GlobalId { instance, promoted: None };
let ty = match intrinsic_name {
sym::min_align_of | sym::pref_align_of | sym::size_of => self.tcx.types.usize,
sym::needs_drop => self.tcx.types.bool,
sym::type_id => self.tcx.types.u64,
sym::type_name => self.tcx.mk_static_str(),
_ => bug!("already checked for nullary intrinsics"),
};
let val = self.const_eval(gid, ty)?;
self.copy_op(val, dest)?;
}
sym::ctpop
| sym::cttz
| sym::cttz_nonzero
| sym::ctlz
| sym::ctlz_nonzero
| sym::bswap
| sym::bitreverse => {
let ty = substs.type_at(0);
let layout_of = self.layout_of(ty)?;
let val = self.read_scalar(args[0])?.not_undef()?;
let bits = self.force_bits(val, layout_of.size)?;
let kind = match layout_of.abi {
Abi::Scalar(ref scalar) => scalar.value,
_ => bug!("{} called on invalid type {:?}", intrinsic_name, ty),
};
let (nonzero, intrinsic_name) = match intrinsic_name {
sym::cttz_nonzero => (true, sym::cttz),
sym::ctlz_nonzero => (true, sym::ctlz),
other => (false, other),
};
if nonzero && bits == 0 {
throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
}
let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
self.write_scalar(out_val, dest)?;
}
sym::wrapping_add
| sym::wrapping_sub
| sym::wrapping_mul
| sym::add_with_overflow
| sym::sub_with_overflow
| sym::mul_with_overflow => {
let lhs = self.read_immediate(args[0])?;
let rhs = self.read_immediate(args[1])?;
let (bin_op, ignore_overflow) = match intrinsic_name {
sym::wrapping_add => (BinOp::Add, true),
sym::wrapping_sub => (BinOp::Sub, true),
sym::wrapping_mul => (BinOp::Mul, true),
sym::add_with_overflow => (BinOp::Add, false),
sym::sub_with_overflow => (BinOp::Sub, false),
sym::mul_with_overflow => (BinOp::Mul, false),
_ => bug!("Already checked for int ops"),
};
if ignore_overflow {
self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
} else {
self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
}
}
sym::saturating_add | sym::saturating_sub => {
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let is_add = intrinsic_name == sym::saturating_add;
let (val, overflowed, _ty) =
self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
let val = if overflowed {
let num_bits = l.layout.size.bits();
if l.layout.abi.is_signed() {
// For signed ints the saturated value depends on the sign of the first
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no
// overflow can occur)
let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
if first_term_positive {
// Negative overflow not possible since the positive first term
// can only increase an (in range) negative term for addition
// or corresponding negated positive term for subtraction
Scalar::from_uint(
(1u128 << (num_bits - 1)) - 1, // max positive
Size::from_bits(num_bits),
)
} else {
// Positive overflow not possible for similar reason
// max negative
Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
}
} else {
// unsigned
if is_add {
// max unsigned
Scalar::from_uint(
u128::MAX >> (128 - num_bits),
Size::from_bits(num_bits),
)
} else {
// underflow to 0
Scalar::from_uint(0u128, Size::from_bits(num_bits))
}
}
} else {
val
};
self.write_scalar(val, dest)?;
}
sym::discriminant_value => {
let place = self.deref_operand(args[0])?;
let discr_val = self.read_discriminant(place.into())?.0;
self.write_scalar(Scalar::from_u64(u64::try_from(discr_val).unwrap()), dest)?;
}
sym::unchecked_shl
| sym::unchecked_shr
| sym::unchecked_add
| sym::unchecked_sub
| sym::unchecked_mul
| sym::unchecked_div
| sym::unchecked_rem => {
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let bin_op = match intrinsic_name {
sym::unchecked_shl => BinOp::Shl,
sym::unchecked_shr => BinOp::Shr,
sym::unchecked_add => BinOp::Add,
sym::unchecked_sub => BinOp::Sub,
sym::unchecked_mul => BinOp::Mul,
sym::unchecked_div => BinOp::Div,
sym::unchecked_rem => BinOp::Rem,
_ => bug!("Already checked for int ops"),
};
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
if overflowed {
let layout = self.layout_of(substs.type_at(0))?;
let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
} else {
throw_ub_format!("overflow executing `{}`", intrinsic_name);
}
}
self.write_scalar(val, dest)?;
}
sym::rotate_left | sym::rotate_right => {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let layout = self.layout_of(substs.type_at(0))?;
let val = self.read_scalar(args[0])?.not_undef()?;
let val_bits = self.force_bits(val, layout.size)?;
let raw_shift = self.read_scalar(args[1])?.not_undef()?;
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
let width_bits = u128::from(layout.size.bits());
let shift_bits = raw_shift_bits % width_bits;
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
let result_bits = if intrinsic_name == sym::rotate_left {
(val_bits << shift_bits) | (val_bits >> inv_shift_bits)
} else {
(val_bits >> shift_bits) | (val_bits << inv_shift_bits)
};
let truncated_bits = self.truncate(result_bits, layout);
let result = Scalar::from_uint(truncated_bits, layout.size);
self.write_scalar(result, dest)?;
}
sym::ptr_offset_from => {
let a = self.read_immediate(args[0])?.to_scalar()?;
let b = self.read_immediate(args[1])?.to_scalar()?;
// Special case: if both scalars are *equal integers*
// and not NULL, we pretend there is an allocation of size 0 right there,
// and their offset is 0. (There's never a valid object at NULL, making it an
// exception from the exception.)
// This is the dual to the special exception for offset-by-0
// in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
//
// Control flow is weird because we cannot early-return (to reach the
// `go_to_block` at the end).
let done = if a.is_bits() && b.is_bits() {
let a = a.to_machine_usize(self)?;
let b = b.to_machine_usize(self)?;
if a == b && a != 0 {
self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
true
} else {
false
}
} else {
false
};
if !done {
// General case: we need two pointers.
let a = self.force_ptr(a)?;
let b = self.force_ptr(b)?;
if a.alloc_id != b.alloc_id {
throw_ub_format!(
"ptr_offset_from cannot compute offset of pointers into different \
allocations.",
);
}
let usize_layout = self.layout_of(self.tcx.types.usize)?;
let isize_layout = self.layout_of(self.tcx.types.isize)?;
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
let (val, _overflowed, _ty) =
self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
let pointee_layout = self.layout_of(substs.type_at(0))?;
let val = ImmTy::from_scalar(val, isize_layout);
let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
self.exact_div(val, size, dest)?;
}
}
sym::transmute => {
self.copy_op_transmute(args[0], dest)?;
}
sym::simd_insert => {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let elem = args[2];
let input = args[0];
let (len, e_ty) = input.layout.ty.simd_size_and_type(self.tcx.tcx);
assert!(
index < len,
"Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
index,
e_ty,
len
);
assert_eq!(
input.layout, dest.layout,
"Return type `{}` must match vector type `{}`",
dest.layout.ty, input.layout.ty
);
assert_eq!(
elem.layout.ty, e_ty,
"Scalar element type `{}` must match vector element type `{}`",
elem.layout.ty, e_ty
);
for i in 0..len {
let place = self.place_index(dest, i)?;
let value = if i == index { elem } else { self.operand_index(input, i)? };
self.copy_op(value, place)?;
}
}
sym::simd_extract => {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let (len, e_ty) = args[0].layout.ty.simd_size_and_type(self.tcx.tcx);
assert!(
index < len,
"index `{}` is out-of-bounds of vector type `{}` with length `{}`",
index,
e_ty,
len
);
assert_eq!(
e_ty, dest.layout.ty,
"Return type `{}` must match vector element type `{}`",
dest.layout.ty, e_ty
);
self.copy_op(self.operand_index(args[0], index)?, dest)?;
}
_ => return Ok(false),
}
self.dump_place(*dest);
self.go_to_block(ret);
Ok(true)
}
pub fn exact_div(
&mut self,
a: ImmTy<'tcx, M::PointerTag>,
b: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
if overflow || res.assert_bits(a.layout.size) != 0 {
// Then, check if `b` is -1, which is the "MIN / -1" case.
let minus1 = Scalar::from_int(-1, dest.layout.size);
let b_scalar = b.to_scalar().unwrap();
if b_scalar == minus1 {
throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
} else {
throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
}
}
// `Rem` says this is all right, so we can let `Div` do its job.
self.binop_ignore_overflow(BinOp::Div, a, b, dest)
}
}
| 45.565111 | 100 | 0.487894 |
61a51b6726114de357abd4d6a5bd069997eddeca | 730 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn clamp<T:Ord + Signed>(x: T, mn: T, mx: T) -> T {
cond!(
(x > mx) { mx }
(x < mn) { mn }
_ { x }
)
}
fn main() {
assert_eq!(clamp(1, 2, 4), 2);
assert_eq!(clamp(8, 2, 4), 4);
assert_eq!(clamp(3, 2, 4), 3);
}
| 30.416667 | 68 | 0.616438 |
0e8d460fb672673c251c9339f201a98c4af36ed0 | 42,256 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub mod operations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/providers/Microsoft.AVS/operations", &operation_config.base_path,);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: OperationList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod locations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn check_trial_availability(
operation_config: &crate::OperationConfig,
subscription_id: &str,
location: &str,
) -> std::result::Result<Trial, check_trial_availability::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AVS/locations/{}/checkTrialAvailability",
&operation_config.base_path, subscription_id, location
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(check_trial_availability::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(check_trial_availability::BuildRequestError)?;
let rsp = client.execute(req).await.context(check_trial_availability::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(check_trial_availability::ResponseBytesError)?;
let rsp_value: Trial = serde_json::from_slice(&body).context(check_trial_availability::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(check_trial_availability::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(check_trial_availability::DeserializeError { body })?;
check_trial_availability::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod check_trial_availability {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn check_quota_availability(
operation_config: &crate::OperationConfig,
subscription_id: &str,
location: &str,
) -> std::result::Result<Quota, check_quota_availability::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AVS/locations/{}/checkQuotaAvailability",
&operation_config.base_path, subscription_id, location
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(check_quota_availability::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(check_quota_availability::BuildRequestError)?;
let rsp = client.execute(req).await.context(check_quota_availability::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(check_quota_availability::ResponseBytesError)?;
let rsp_value: Quota = serde_json::from_slice(&body).context(check_quota_availability::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(check_quota_availability::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(check_quota_availability::DeserializeError { body })?;
check_quota_availability::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod check_quota_availability {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod private_clouds {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<PrivateCloudList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds",
&operation_config.base_path, subscription_id, resource_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: PrivateCloudList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_in_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<PrivateCloudList, list_in_subscription::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AVS/privateClouds",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_in_subscription::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_in_subscription::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_in_subscription::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_in_subscription::ResponseBytesError)?;
let rsp_value: PrivateCloudList = serde_json::from_slice(&body).context(list_in_subscription::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_in_subscription::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(list_in_subscription::DeserializeError { body })?;
list_in_subscription::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_in_subscription {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
) -> std::result::Result<PrivateCloud, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: PrivateCloud = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
private_cloud: &PrivateCloud,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(private_cloud);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: PrivateCloud = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: PrivateCloud = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(PrivateCloud),
Created201(PrivateCloud),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
private_cloud: &PrivateCloud,
) -> std::result::Result<update::Response, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(private_cloud);
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: PrivateCloud = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: PrivateCloud = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(PrivateCloud),
Created201(PrivateCloud),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_admin_credentials(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
) -> std::result::Result<AdminCredentials, list_admin_credentials::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}/listAdminCredentials",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_admin_credentials::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_admin_credentials::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_admin_credentials::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_admin_credentials::ResponseBytesError)?;
let rsp_value: AdminCredentials =
serde_json::from_slice(&body).context(list_admin_credentials::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_admin_credentials::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(list_admin_credentials::DeserializeError { body })?;
list_admin_credentials::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_admin_credentials {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod clusters {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
) -> std::result::Result<ClusterList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}/clusters",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ClusterList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
cluster_name: &str,
) -> std::result::Result<Cluster, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}/clusters/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name, cluster_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Cluster = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
cluster_name: &str,
cluster: &Cluster,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}/clusters/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name, cluster_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(cluster);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Cluster = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Cluster = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Cluster),
Created201(Cluster),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
cluster_name: &str,
cluster: &Cluster,
) -> std::result::Result<update::Response, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}/clusters/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name, cluster_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(cluster);
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Cluster = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Cluster = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Cluster),
Created201(Cluster),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_cloud_name: &str,
cluster_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AVS/privateClouds/{}/clusters/{}",
&operation_config.base_path, subscription_id, resource_group_name, private_cloud_name, cluster_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ApiError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::ApiError },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
| 47.693002 | 138 | 0.599725 |
d98159d677df87a6e63569e03b44ddca31f3bb82 | 2,025 | use crate::cart::BankingController;
pub struct MBC5 {
rom: Vec<u8>,
rom_bank: u32,
ram: Vec<u8>,
ram_bank: u32,
ram_enabled: bool,
}
impl MBC5 {
pub(crate) fn new(data: Vec<u8>) -> MBC5 {
return MBC5 {
rom: data,
rom_bank: 1,
ram: vec![0; 0x20000],
ram_bank: 0,
ram_enabled: false,
};
}
pub fn new_as_bc(data: Vec<u8>) -> impl BankingController {
return MBC5::new(data);
}
}
impl BankingController for MBC5 {
fn read(&self, address: u16) -> u8 {
return match address {
0..=0x3FFF => self.rom[address as usize],
0x4000..=0x7FFF => {
self.rom[((address - 0x4000) as u32 + (self.rom_bank * 0x4000)) as usize]
}
_ => self.ram[((0x2000 * self.ram_bank) + (address - 0xA000) as u32) as usize],
};
}
fn write_rom(&mut self, address: u16, value: u8) {
match address {
0..=0x1FFF => {
if value & 0xF == 0xA {
self.ram_enabled = true;
} else if value & 0xF == 0x0 {
self.ram_enabled = false;
}
}
0x2000..=0x3FFF => {
self.rom_bank = (self.rom_bank & 0x100) | (value as u32);
}
0x4000..=0x5FFF => {
self.rom_bank = (self.rom_bank & 0xFF) | ((value & 0x01) as u32).wrapping_shl(8);
}
0x6000..=0x7FFF => {
self.ram_bank = (value & 0xF) as u32;
}
_ => {}
}
}
fn write_ram(&mut self, address: u16, value: u8) {
if self.ram_enabled {
self.ram[((0x2000 * self.ram_bank) + (address - 0xA000) as u32) as usize] = value
}
}
fn get_save_data(&self) -> Vec<u8> {
return self.ram.to_vec();
}
fn load_save_data(&mut self, data: Vec<u8>) {
self.ram = data;
self.ram.resize(0x20000, 0)
}
}
| 26.644737 | 97 | 0.472593 |
2fb40353eec61a7ce61c40ecfa579b6621e29448 | 102,602 | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn artifacts(&self) -> artifacts::Client {
artifacts::Client(self.clone())
}
pub fn assignment_operations(&self) -> assignment_operations::Client {
assignment_operations::Client(self.clone())
}
pub fn assignments(&self) -> assignments::Client {
assignments::Client(self.clone())
}
pub fn blueprints(&self) -> blueprints::Client {
blueprints::Client(self.clone())
}
pub fn published_artifacts(&self) -> published_artifacts::Client {
published_artifacts::Client(self.clone())
}
pub fn published_blueprints(&self) -> published_blueprints::Client {
published_blueprints::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
Blueprints_Get(#[from] blueprints::get::Error),
#[error(transparent)]
Blueprints_CreateOrUpdate(#[from] blueprints::create_or_update::Error),
#[error(transparent)]
Blueprints_Delete(#[from] blueprints::delete::Error),
#[error(transparent)]
Blueprints_List(#[from] blueprints::list::Error),
#[error(transparent)]
Artifacts_Get(#[from] artifacts::get::Error),
#[error(transparent)]
Artifacts_CreateOrUpdate(#[from] artifacts::create_or_update::Error),
#[error(transparent)]
Artifacts_Delete(#[from] artifacts::delete::Error),
#[error(transparent)]
Artifacts_List(#[from] artifacts::list::Error),
#[error(transparent)]
PublishedBlueprints_Get(#[from] published_blueprints::get::Error),
#[error(transparent)]
PublishedBlueprints_Create(#[from] published_blueprints::create::Error),
#[error(transparent)]
PublishedBlueprints_Delete(#[from] published_blueprints::delete::Error),
#[error(transparent)]
PublishedBlueprints_List(#[from] published_blueprints::list::Error),
#[error(transparent)]
PublishedArtifacts_Get(#[from] published_artifacts::get::Error),
#[error(transparent)]
PublishedArtifacts_List(#[from] published_artifacts::list::Error),
#[error(transparent)]
Assignments_Get(#[from] assignments::get::Error),
#[error(transparent)]
Assignments_CreateOrUpdate(#[from] assignments::create_or_update::Error),
#[error(transparent)]
Assignments_Delete(#[from] assignments::delete::Error),
#[error(transparent)]
Assignments_WhoIsBlueprint(#[from] assignments::who_is_blueprint::Error),
#[error(transparent)]
Assignments_List(#[from] assignments::list::Error),
#[error(transparent)]
AssignmentOperations_List(#[from] assignment_operations::list::Error),
#[error(transparent)]
AssignmentOperations_Get(#[from] assignment_operations::get::Error),
}
pub mod blueprints {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(&self, resource_scope: impl Into<String>, blueprint_name: impl Into<String>) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
}
}
pub fn create_or_update(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
blueprint: impl Into<models::Blueprint>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
blueprint: blueprint.into(),
}
}
pub fn delete(&self, resource_scope: impl Into<String>, blueprint_name: impl Into<String>) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
}
}
pub fn list(&self, resource_scope: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Blueprint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Blueprint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) blueprint: models::Blueprint,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Blueprint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.blueprint).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Blueprint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Blueprint),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Blueprint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BlueprintList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints",
self.client.endpoint(),
&self.resource_scope
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BlueprintList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod artifacts {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
artifact_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
artifact_name: artifact_name.into(),
}
}
pub fn create_or_update(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
artifact_name: impl Into<String>,
artifact: impl Into<models::Artifact>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
artifact_name: artifact_name.into(),
artifact: artifact.into(),
}
}
pub fn delete(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
artifact_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
artifact_name: artifact_name.into(),
}
}
pub fn list(&self, resource_scope: impl Into<String>, blueprint_name: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) artifact_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Artifact, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/artifacts/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.artifact_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Artifact =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) artifact_name: String,
pub(crate) artifact: models::Artifact,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Artifact, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/artifacts/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.artifact_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.artifact).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Artifact =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Artifact),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) artifact_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/artifacts/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.artifact_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Artifact =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ArtifactList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/artifacts",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ArtifactList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod published_blueprints {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
version_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
version_id: version_id.into(),
}
}
pub fn create(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
version_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
version_id: version_id.into(),
published_blueprint: None,
}
}
pub fn delete(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
version_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
version_id: version_id.into(),
}
}
pub fn list(&self, resource_scope: impl Into<String>, blueprint_name: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) version_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PublishedBlueprint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/versions/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.version_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PublishedBlueprint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) version_id: String,
pub(crate) published_blueprint: Option<models::PublishedBlueprint>,
}
impl Builder {
pub fn published_blueprint(mut self, published_blueprint: impl Into<models::PublishedBlueprint>) -> Self {
self.published_blueprint = Some(published_blueprint.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PublishedBlueprint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/versions/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.version_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = if let Some(published_blueprint) = &self.published_blueprint {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(published_blueprint).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PublishedBlueprint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::PublishedBlueprint),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) version_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/versions/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.version_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PublishedBlueprint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PublishedBlueprintList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/versions",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PublishedBlueprintList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod published_artifacts {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
version_id: impl Into<String>,
artifact_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
version_id: version_id.into(),
artifact_name: artifact_name.into(),
}
}
pub fn list(
&self,
resource_scope: impl Into<String>,
blueprint_name: impl Into<String>,
version_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
blueprint_name: blueprint_name.into(),
version_id: version_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) version_id: String,
pub(crate) artifact_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Artifact, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/versions/{}/artifacts/{}",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.version_id,
&self.artifact_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Artifact =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) blueprint_name: String,
pub(crate) version_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ArtifactList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprints/{}/versions/{}/artifacts",
self.client.endpoint(),
&self.resource_scope,
&self.blueprint_name,
&self.version_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ArtifactList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod assignments {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(&self, resource_scope: impl Into<String>, assignment_name: impl Into<String>) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
assignment_name: assignment_name.into(),
}
}
pub fn create_or_update(
&self,
resource_scope: impl Into<String>,
assignment_name: impl Into<String>,
assignment: impl Into<models::Assignment>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
assignment_name: assignment_name.into(),
assignment: assignment.into(),
}
}
pub fn delete(&self, resource_scope: impl Into<String>, assignment_name: impl Into<String>) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
assignment_name: assignment_name.into(),
delete_behavior: None,
}
}
pub fn who_is_blueprint(&self, resource_scope: impl Into<String>, assignment_name: impl Into<String>) -> who_is_blueprint::Builder {
who_is_blueprint::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
assignment_name: assignment_name.into(),
}
}
pub fn list(&self, resource_scope: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) assignment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Assignment, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments/{}",
self.client.endpoint(),
&self.resource_scope,
&self.assignment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Assignment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) assignment_name: String,
pub(crate) assignment: models::Assignment,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Assignment, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments/{}",
self.client.endpoint(),
&self.resource_scope,
&self.assignment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.assignment).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Assignment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Accepted202(models::Assignment),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) assignment_name: String,
pub(crate) delete_behavior: Option<String>,
}
impl Builder {
pub fn delete_behavior(mut self, delete_behavior: impl Into<String>) -> Self {
self.delete_behavior = Some(delete_behavior.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments/{}",
self.client.endpoint(),
&self.resource_scope,
&self.assignment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
if let Some(delete_behavior) = &self.delete_behavior {
url.query_pairs_mut().append_pair("deleteBehavior", delete_behavior);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Assignment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod who_is_blueprint {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) assignment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::WhoIsBlueprintContract, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments/{}/whoIsBlueprint",
self.client.endpoint(),
&self.resource_scope,
&self.assignment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WhoIsBlueprintContract =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AssignmentList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments",
self.client.endpoint(),
&self.resource_scope
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AssignmentList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod assignment_operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, resource_scope: impl Into<String>, assignment_name: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
assignment_name: assignment_name.into(),
}
}
pub fn get(
&self,
resource_scope: impl Into<String>,
assignment_name: impl Into<String>,
assignment_operation_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_scope: resource_scope.into(),
assignment_name: assignment_name.into(),
assignment_operation_name: assignment_operation_name.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) assignment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AssignmentOperationList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments/{}/assignmentOperations",
self.client.endpoint(),
&self.resource_scope,
&self.assignment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AssignmentOperationList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_scope: String,
pub(crate) assignment_name: String,
pub(crate) assignment_operation_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AssignmentOperation, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/{}/providers/Microsoft.Blueprint/blueprintAssignments/{}/assignmentOperations/{}",
self.client.endpoint(),
&self.resource_scope,
&self.assignment_name,
&self.assignment_operation_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-11-01-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AssignmentOperation =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| 50.39391 | 140 | 0.51228 |
7af8826ac3efc03c952f1c4adb04fcd2cd0c16d4 | 3,563 | use crate::event::Event;
use std::{
sync::{mpsc, mpsc::Receiver},
thread::JoinHandle,
};
/// Maximum number of events consumed from event source.
const EXPECTED_EVENT_COUNT: usize = 1_000_000;
/// Processes stream of source events. Orders events by seq. Expects all events with consequtive seqs starting from 1 to be received.
/// Processing would block if there's a gap in event seqs (e.g. an event was lost). If necessary a
/// timeout might be introduced to allow skipping lost messages.
pub fn source_events() -> (Receiver<Event>, JoinHandle<()>) {
use crate::parse_message;
use std::{
cmp::Reverse, collections::BinaryHeap, io::BufRead, io::BufReader, net::TcpListener,
str::FromStr, thread,
};
let source_listener = TcpListener::bind("127.0.0.1:9999").expect("source listener failed");
log::info!(
"Started source listener on {}",
source_listener
.local_addr()
.expect("failed to get source server address")
);
let (sender, receiver) = mpsc::channel();
let handle = thread::spawn(move || {
log::debug!("Source acceptor started");
let (source_stream, _addr) = source_listener.accept().expect("source connection failed");
let mut source_reader = BufReader::new(source_stream);
// TODO pass to user acceptor to wait until all expected users connect?
let total_users: usize =
parse_message(&mut source_reader).expect("failed to read user number");
log::info!("Total number of users expected {}", total_users);
let mut event_queue = BinaryHeap::new();
let mut event_count = 0;
let mut next_seq = 1;
for event_line in source_reader.lines() {
let event_line = event_line.expect("failed to read event from source");
let event = Event::from_str(&event_line).expect("failed to parse event");
log::trace!("Buffering event received from source {:?}", event);
event_queue.push(Reverse(event));
while event_queue
.peek()
.map(|Reverse(e)| e.seq == next_seq)
.unwrap_or(false)
{
let Reverse(event) = event_queue.pop().unwrap();
log::trace!("Forwarding sourced event {:?}", event);
sender.send(event).expect("failed to send sourced event");
next_seq += 1;
}
event_count += 1;
if event_count == EXPECTED_EVENT_COUNT {
log::info!("Processed all ({}) events", EXPECTED_EVENT_COUNT);
break;
}
}
if event_count != EXPECTED_EVENT_COUNT {
log::warn!(
"Expected {} events, only {} received",
EXPECTED_EVENT_COUNT,
event_count
);
}
});
(receiver, handle)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Result;
#[test]
fn route_events() -> Result<()> {
use std::{io::Write, net::TcpStream};
let (receiver, _join_handle) = source_events();
let mut source_stream = TcpStream::connect("127.0.0.1:9999")?;
write!(
&mut source_stream,
"10\n2/U/2/3\n1/B/2/9\n3/F/2/3\n4/S/2/hi\n"
)?;
assert_eq!(1, receiver.recv().ok().unwrap().seq);
assert_eq!(2, receiver.recv().ok().unwrap().seq);
assert_eq!(3, receiver.recv().ok().unwrap().seq);
assert_eq!(4, receiver.recv().ok().unwrap().seq);
Ok(())
}
}
| 37.114583 | 133 | 0.579006 |
3855b1618d8f497e6de3ce481c040e6c129f6a0a | 6,925 | use crate::Config;
use crate::TermBuffer;
use crossterm::{
self as ct,
event::{self, Event, KeyCode, KeyEvent, KeyModifiers},
style::{style, Color},
};
#[derive(Debug)]
pub struct ScopePrompt<'a> {
config: &'a Config,
input: String,
selected_index: u16,
ty: &'a str,
x_offset: u16,
finished: bool,
}
pub enum ScopePromptResult {
Scope(Option<String>, usize),
Escape,
Terminate,
}
impl<'a> ScopePrompt<'a> {
pub fn new(config: &'a Config, ty: &'a str) -> Self {
ScopePrompt {
config,
input: Default::default(),
selected_index: 0,
ty,
x_offset: 0,
finished: false,
}
}
pub fn run(mut self) -> ScopePromptResult {
let mut buffer = TermBuffer::new();
let figlet = self
.config
.get_figlet()
.expect("Ensure figlet_file points to a valid file, or remove it.");
let mut first_iteration = true;
loop {
let event = if first_iteration {
first_iteration = false;
None
} else {
match event::read() {
Ok(Event::Key(KeyEvent { code, modifiers })) => Some((
code,
modifiers.contains(KeyModifiers::CONTROL),
modifiers.contains(KeyModifiers::SHIFT),
modifiers.contains(KeyModifiers::ALT),
)),
_ => continue,
}
};
match event {
Some((KeyCode::Char('c'), true, false, false)) => {
return ScopePromptResult::Terminate;
}
Some((KeyCode::Enter, false, false, false)) => {
self.finished = true;
}
Some((KeyCode::Char(c), false, _, false)) => {
let accept = (c >= 'a' && c <= 'z')
|| (c >= 'A' && c <= 'Z')
|| (c >= '0' && c <= '9')
|| (c == '_')
|| c == '-'
|| c == '/'
|| c == ','
|| c == '|';
if accept {
self.x_offset += 1;
self.input
.insert(self.x_offset as usize - 1, c.to_ascii_lowercase());
}
}
Some((KeyCode::Left, false, _, false)) => {
self.x_offset = self.x_offset.saturating_sub(1);
}
Some((KeyCode::Right, false, _, false)) => {
if (self.x_offset as usize) < self.input.len() {
self.x_offset += 1;
}
}
Some((KeyCode::Backspace, false, _, false)) => {
let offset = self.x_offset as usize;
let len = self.input.len();
if len > 0 && offset < len - 1 {
self.input.remove(offset - 1);
self.x_offset -= 1;
} else if len > 0 {
self.input.pop();
self.x_offset -= 1;
}
}
Some((KeyCode::Esc, false, _, false)) => {
return ScopePromptResult::Escape;
}
None => {}
_ => continue,
};
let (term_width, _) = ct::terminal::size().expect("get terminal size");
let mut lines = figlet.create_vec();
let mut cursor_x = 0;
cursor_x += figlet.write_to_buf_color(&self.ty, &mut lines[..], |s| {
style(s).with(Color::Blue).to_string()
});
let show_parens = !self.finished || !self.input.is_empty();
if show_parens {
cursor_x += figlet.write_to_buf_color("(", &mut lines[..], |s| {
style(s).with(Color::Grey).to_string()
});
}
let offset = self.x_offset as usize;
cursor_x +=
figlet.write_to_buf_color(&(self.input.as_str())[0..offset], &mut lines[..], |s| {
style(s).with(Color::Green).to_string()
});
let mut fig_width = cursor_x;
// Insert the indicator for where input will be placed.
// Note that
if !self.finished {
fig_width += figlet.write_to_buf_color("-", &mut lines[..], |s| {
style(s).with(Color::Grey).to_string()
});
}
fig_width +=
figlet.write_to_buf_color(&(self.input.as_str())[offset..], &mut lines[..], |s| {
style(s).with(Color::Green).to_string()
});
if show_parens {
fig_width += figlet.write_to_buf_color(")", &mut lines[..], |s| {
style(s).with(Color::Grey).to_string()
});
}
fig_width += figlet.write_to_buf_color(":", &mut lines[..], |s| {
style(s).with(Color::Grey).to_string()
});
// We're tracking the printed width above to see if we've run out of space here.
let figlet_overflows = fig_width + 1 > term_width as usize;
let cursor_y = if figlet_overflows { 1 } else { 3 };
// If we did overflow, then for now we should display it as a single line with one line of padding above/below
if figlet_overflows {
use std::fmt::Write;
lines = vec!["".into(), "".into(), "".into()];
let line = &mut lines[1];
write!(line, "{}", style(&self.ty).with(Color::Blue)).unwrap();
write!(line, "{}", style("(").with(Color::Grey)).unwrap();
write!(
line,
"{}",
style(&(self.input.as_str())[0..offset]).with(Color::Green)
)
.unwrap();
if !self.finished {
write!(line, "{}", style("_").with(Color::Grey)).unwrap();
}
write!(line, "{}", style(")").with(Color::Grey)).unwrap();
cursor_x = self.ty.len() + 1 + self.input.len();
}
for line in lines {
buffer.push_line(line);
}
buffer.set_next_cursor((cursor_x as u16, cursor_y));
buffer.render_frame();
buffer.flush();
if self.finished {
let rows = buffer.forget();
return ScopePromptResult::Scope(Some(self.input).filter(|s| !s.is_empty()), rows);
}
}
}
}
| 33.946078 | 122 | 0.42426 |
f8fe61469fc070ece9891f3aad64b3d093819cab | 18,445 | /*
Copyright Michael Lodder. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
/// Curve25519 is not a prime order curve
/// Since this crate relies on the ff::PrimeField
/// and Curve25519 does work with secret sharing schemes
/// This code wraps the Ristretto points and scalars in a facade
/// to be compliant to work with this library.
/// The intent is the consumer will not have to use these directly since
/// the wrappers implement the [`From`] and [`Into`] traits.
use core::{
borrow::Borrow,
iter::Sum,
ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
};
use curve25519_dalek::{
constants::{ED25519_BASEPOINT_POINT, RISTRETTO_BASEPOINT_POINT},
edwards::{CompressedEdwardsY, EdwardsPoint},
ristretto::{CompressedRistretto, RistrettoPoint},
scalar::Scalar,
traits::{Identity, IsIdentity},
};
use ff::{Field, PrimeField};
use group::{Group, GroupEncoding};
use rand_chacha::ChaChaRng;
use rand_core::{RngCore, SeedableRng};
use subtle::{Choice, ConditionallySelectable, CtOption};
/// Wraps a ristretto25519 point
#[derive(Copy, Clone, Debug, Eq)]
pub struct WrappedRistretto(pub RistrettoPoint);
impl Group for WrappedRistretto {
type Scalar = WrappedScalar;
fn random(mut rng: impl RngCore) -> Self {
let mut seed = [0u8; 32];
rng.fill_bytes(&mut seed);
let mut crng = ChaChaRng::from_seed(seed);
Self(RistrettoPoint::random(&mut crng))
}
fn identity() -> Self {
Self(RistrettoPoint::identity())
}
fn generator() -> Self {
Self(RISTRETTO_BASEPOINT_POINT)
}
fn is_identity(&self) -> Choice {
Choice::from(u8::from(self.0.is_identity()))
}
fn double(&self) -> Self {
Self(self.0 + self.0)
}
}
impl<T> Sum<T> for WrappedRistretto
where
T: Borrow<WrappedRistretto>,
{
fn sum<I: Iterator<Item = T>>(iter: I) -> Self {
iter.fold(Self::identity(), |acc, item| acc + item.borrow())
}
}
impl<'a> Neg for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn neg(self) -> Self::Output {
WrappedRistretto(self.0.neg())
}
}
impl Neg for WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn neg(self) -> Self::Output {
-&self
}
}
impl PartialEq for WrappedRistretto {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<'a, 'b> Add<&'b WrappedRistretto> for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn add(self, rhs: &'b WrappedRistretto) -> Self::Output {
*self + *rhs
}
}
impl<'b> Add<&'b WrappedRistretto> for WrappedRistretto {
type Output = Self;
#[inline]
fn add(self, rhs: &'b WrappedRistretto) -> Self::Output {
self + *rhs
}
}
impl<'a> Add<WrappedRistretto> for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn add(self, rhs: WrappedRistretto) -> Self::Output {
*self + rhs
}
}
impl Add for WrappedRistretto {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self::Output {
WrappedRistretto(self.0 + rhs.0)
}
}
impl AddAssign for WrappedRistretto {
#[inline]
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs;
}
}
impl<'b> AddAssign<&'b WrappedRistretto> for WrappedRistretto {
#[inline]
fn add_assign(&mut self, rhs: &'b WrappedRistretto) {
*self = *self + *rhs;
}
}
impl<'a, 'b> Sub<&'b WrappedRistretto> for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn sub(self, rhs: &'b WrappedRistretto) -> Self::Output {
*self - *rhs
}
}
impl<'b> Sub<&'b WrappedRistretto> for WrappedRistretto {
type Output = Self;
#[inline]
fn sub(self, rhs: &'b WrappedRistretto) -> Self::Output {
self - *rhs
}
}
impl<'a> Sub<WrappedRistretto> for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn sub(self, rhs: WrappedRistretto) -> Self::Output {
*self - rhs
}
}
impl Sub for WrappedRistretto {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self::Output {
WrappedRistretto(self.0 - rhs.0)
}
}
impl SubAssign for WrappedRistretto {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
*self = *self - rhs;
}
}
impl<'b> SubAssign<&'b WrappedRistretto> for WrappedRistretto {
#[inline]
fn sub_assign(&mut self, rhs: &'b WrappedRistretto) {
*self = *self - *rhs;
}
}
impl<'a, 'b> Mul<&'b WrappedScalar> for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn mul(self, rhs: &'b WrappedScalar) -> Self::Output {
*self * *rhs
}
}
impl<'b> Mul<&'b WrappedScalar> for WrappedRistretto {
type Output = Self;
#[inline]
fn mul(self, rhs: &'b WrappedScalar) -> Self::Output {
self * *rhs
}
}
impl<'a> Mul<WrappedScalar> for &'a WrappedRistretto {
type Output = WrappedRistretto;
#[inline]
fn mul(self, rhs: WrappedScalar) -> Self::Output {
*self * rhs
}
}
impl Mul<WrappedScalar> for WrappedRistretto {
type Output = Self;
#[inline]
fn mul(self, rhs: WrappedScalar) -> Self::Output {
WrappedRistretto(self.0 * rhs.0)
}
}
impl MulAssign<WrappedScalar> for WrappedRistretto {
#[inline]
fn mul_assign(&mut self, rhs: WrappedScalar) {
*self = *self * rhs;
}
}
impl<'b> MulAssign<&'b WrappedScalar> for WrappedRistretto {
#[inline]
fn mul_assign(&mut self, rhs: &'b WrappedScalar) {
*self = *self * *rhs;
}
}
impl GroupEncoding for WrappedRistretto {
type Repr = [u8; 32];
fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
let p = CompressedRistretto(*bytes);
match p.decompress() {
None => CtOption::new(Self(RistrettoPoint::identity()), Choice::from(0u8)),
Some(rp) => CtOption::new(Self(rp), Choice::from(1u8)),
}
}
fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
Self::from_bytes(bytes)
}
fn to_bytes(&self) -> Self::Repr {
self.0.compress().0
}
}
impl Default for WrappedRistretto {
fn default() -> Self {
Self(RistrettoPoint::identity())
}
}
impl From<WrappedRistretto> for RistrettoPoint {
fn from(p: WrappedRistretto) -> RistrettoPoint {
p.0
}
}
impl From<RistrettoPoint> for WrappedRistretto {
fn from(p: RistrettoPoint) -> Self {
Self(p)
}
}
/// Wraps an ed25519 point
#[derive(Copy, Clone, Debug, Eq)]
pub struct WrappedEdwards(pub EdwardsPoint);
impl Group for WrappedEdwards {
type Scalar = WrappedScalar;
fn random(mut rng: impl RngCore) -> Self {
let mut seed = [0u8; 32];
rng.fill_bytes(&mut seed);
Self(EdwardsPoint::hash_from_bytes::<sha2::Sha512>(&seed))
}
fn identity() -> Self {
Self(EdwardsPoint::identity())
}
fn generator() -> Self {
Self(ED25519_BASEPOINT_POINT)
}
fn is_identity(&self) -> Choice {
Choice::from(u8::from(self.0.is_identity()))
}
fn double(&self) -> Self {
Self(self.0 + self.0)
}
}
impl<T> Sum<T> for WrappedEdwards
where
T: Borrow<WrappedEdwards>,
{
fn sum<I: Iterator<Item = T>>(iter: I) -> Self {
iter.fold(Self::identity(), |acc, item| acc + item.borrow())
}
}
impl<'a> Neg for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn neg(self) -> Self::Output {
WrappedEdwards(self.0.neg())
}
}
impl Neg for WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn neg(self) -> Self::Output {
-&self
}
}
impl PartialEq for WrappedEdwards {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<'a, 'b> Add<&'b WrappedEdwards> for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn add(self, rhs: &'b WrappedEdwards) -> Self::Output {
*self + *rhs
}
}
impl<'b> Add<&'b WrappedEdwards> for WrappedEdwards {
type Output = Self;
#[inline]
fn add(self, rhs: &'b WrappedEdwards) -> Self::Output {
self + *rhs
}
}
impl<'a> Add<WrappedEdwards> for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn add(self, rhs: WrappedEdwards) -> Self::Output {
*self + rhs
}
}
impl Add for WrappedEdwards {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self::Output {
WrappedEdwards(self.0 + rhs.0)
}
}
impl AddAssign for WrappedEdwards {
#[inline]
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs;
}
}
impl<'b> AddAssign<&'b WrappedEdwards> for WrappedEdwards {
#[inline]
fn add_assign(&mut self, rhs: &'b WrappedEdwards) {
*self = *self + *rhs;
}
}
impl<'a, 'b> Sub<&'b WrappedEdwards> for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn sub(self, rhs: &'b WrappedEdwards) -> Self::Output {
*self - *rhs
}
}
impl<'b> Sub<&'b WrappedEdwards> for WrappedEdwards {
type Output = Self;
#[inline]
fn sub(self, rhs: &'b WrappedEdwards) -> Self::Output {
self - *rhs
}
}
impl<'a> Sub<WrappedEdwards> for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn sub(self, rhs: WrappedEdwards) -> Self::Output {
*self - rhs
}
}
impl Sub for WrappedEdwards {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self::Output {
WrappedEdwards(self.0 - rhs.0)
}
}
impl SubAssign for WrappedEdwards {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
*self = *self - rhs;
}
}
impl<'b> SubAssign<&'b WrappedEdwards> for WrappedEdwards {
#[inline]
fn sub_assign(&mut self, rhs: &'b WrappedEdwards) {
*self = *self - *rhs;
}
}
impl<'a, 'b> Mul<&'b WrappedScalar> for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn mul(self, rhs: &'b WrappedScalar) -> Self::Output {
*self * *rhs
}
}
impl<'b> Mul<&'b WrappedScalar> for WrappedEdwards {
type Output = Self;
#[inline]
fn mul(self, rhs: &'b WrappedScalar) -> Self::Output {
self * *rhs
}
}
impl<'a> Mul<WrappedScalar> for &'a WrappedEdwards {
type Output = WrappedEdwards;
#[inline]
fn mul(self, rhs: WrappedScalar) -> Self::Output {
*self * rhs
}
}
impl Mul<WrappedScalar> for WrappedEdwards {
type Output = Self;
#[inline]
fn mul(self, rhs: WrappedScalar) -> Self::Output {
WrappedEdwards(self.0 * rhs.0)
}
}
impl MulAssign<WrappedScalar> for WrappedEdwards {
#[inline]
fn mul_assign(&mut self, rhs: WrappedScalar) {
*self = *self * rhs;
}
}
impl<'b> MulAssign<&'b WrappedScalar> for WrappedEdwards {
#[inline]
fn mul_assign(&mut self, rhs: &'b WrappedScalar) {
*self = *self * *rhs;
}
}
impl GroupEncoding for WrappedEdwards {
type Repr = [u8; 32];
fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
let p = CompressedEdwardsY(*bytes);
match p.decompress() {
None => CtOption::new(Self(EdwardsPoint::identity()), Choice::from(0u8)),
Some(rp) => CtOption::new(Self(rp), Choice::from(1u8)),
}
}
fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
Self::from_bytes(bytes)
}
fn to_bytes(&self) -> Self::Repr {
self.0.compress().0
}
}
impl Default for WrappedEdwards {
fn default() -> Self {
Self(EdwardsPoint::identity())
}
}
impl From<WrappedEdwards> for EdwardsPoint {
fn from(p: WrappedEdwards) -> EdwardsPoint {
p.0
}
}
impl From<EdwardsPoint> for WrappedEdwards {
fn from(p: EdwardsPoint) -> Self {
Self(p)
}
}
impl From<WrappedRistretto> for WrappedEdwards {
fn from(p: WrappedRistretto) -> Self {
struct Ed25519(EdwardsPoint);
// can't just return the inner underlying point, since it may not be of order 8.
// compute [8^{-1}][8]P to clear any cofactor
// this is the byte representation of 8^{-1} mod q
let eight_inv = Scalar::from_canonical_bytes([
121, 47, 220, 226, 41, 229, 6, 97, 208, 218, 28, 125, 179, 157, 211, 7, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
])
.unwrap();
let r = unsafe { core::mem::transmute::<RistrettoPoint, Ed25519>(p.0) };
WrappedEdwards(r.0.mul_by_cofactor() * eight_inv)
}
}
/// Wraps a curve25519 scalar
#[derive(Copy, Clone, Debug, Eq)]
pub struct WrappedScalar(pub Scalar);
impl Field for WrappedScalar {
fn random(mut rng: impl RngCore) -> Self {
let mut seed = [0u8; 32];
rng.fill_bytes(&mut seed);
let mut crng = ChaChaRng::from_seed(seed);
Self(Scalar::random(&mut crng))
}
fn zero() -> Self {
Self(Scalar::zero())
}
fn one() -> Self {
Self(Scalar::one())
}
fn is_zero(&self) -> bool {
self.0 == Scalar::zero()
}
fn square(&self) -> Self {
Self(self.0 * self.0)
}
fn double(&self) -> Self {
Self(self.0 + self.0)
}
fn invert(&self) -> CtOption<Self> {
CtOption::new(Self(self.0.invert()), Choice::from(1u8))
}
fn sqrt(&self) -> CtOption<Self> {
// Not used for secret sharing
unimplemented!()
}
}
impl PrimeField for WrappedScalar {
type Repr = [u8; 32];
fn from_repr(bytes: Self::Repr) -> Option<Self> {
Some(Self(Scalar::from_bits(bytes)))
}
fn to_repr(&self) -> Self::Repr {
self.0.to_bytes()
}
fn is_odd(&self) -> bool {
self.0[0] & 1 == 1
}
const NUM_BITS: u32 = 255;
const CAPACITY: u32 = Self::NUM_BITS - 1;
fn multiplicative_generator() -> Self {
unimplemented!();
}
const S: u32 = 32;
fn root_of_unity() -> Self {
unimplemented!();
}
}
impl From<u64> for WrappedScalar {
fn from(d: u64) -> WrappedScalar {
Self(Scalar::from(d))
}
}
impl ConditionallySelectable for WrappedScalar {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Self(Scalar::conditional_select(&a.0, &b.0, choice))
}
}
impl PartialEq for WrappedScalar {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Default for WrappedScalar {
fn default() -> Self {
Self(Scalar::default())
}
}
impl<'a, 'b> Add<&'b WrappedScalar> for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn add(self, rhs: &'b WrappedScalar) -> Self::Output {
*self + *rhs
}
}
impl<'b> Add<&'b WrappedScalar> for WrappedScalar {
type Output = Self;
#[inline]
fn add(self, rhs: &'b WrappedScalar) -> Self::Output {
self + *rhs
}
}
impl<'a> Add<WrappedScalar> for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn add(self, rhs: WrappedScalar) -> Self::Output {
*self + rhs
}
}
impl Add for WrappedScalar {
type Output = Self;
#[inline]
fn add(self, rhs: WrappedScalar) -> Self::Output {
WrappedScalar(self.0 + rhs.0)
}
}
impl AddAssign for WrappedScalar {
#[inline]
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs;
}
}
impl<'b> AddAssign<&'b WrappedScalar> for WrappedScalar {
#[inline]
fn add_assign(&mut self, rhs: &'b WrappedScalar) {
*self = *self + rhs;
}
}
impl<'a, 'b> Sub<&'b WrappedScalar> for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn sub(self, rhs: &'b WrappedScalar) -> Self::Output {
*self - *rhs
}
}
impl<'b> Sub<&'b WrappedScalar> for WrappedScalar {
type Output = Self;
#[inline]
fn sub(self, rhs: &'b WrappedScalar) -> Self::Output {
self - *rhs
}
}
impl<'a> Sub<WrappedScalar> for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn sub(self, rhs: WrappedScalar) -> Self::Output {
*self - rhs
}
}
impl Sub for WrappedScalar {
type Output = Self;
#[inline]
fn sub(self, rhs: WrappedScalar) -> Self::Output {
WrappedScalar(self.0 - rhs.0)
}
}
impl SubAssign for WrappedScalar {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
*self = *self - rhs;
}
}
impl<'b> SubAssign<&'b WrappedScalar> for WrappedScalar {
#[inline]
fn sub_assign(&mut self, rhs: &'b WrappedScalar) {
*self = *self - rhs;
}
}
impl<'a, 'b> Mul<&'b WrappedScalar> for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn mul(self, rhs: &'b WrappedScalar) -> Self::Output {
*self * *rhs
}
}
impl<'b> Mul<&'b WrappedScalar> for WrappedScalar {
type Output = Self;
#[inline]
fn mul(self, rhs: &'b WrappedScalar) -> Self::Output {
self * *rhs
}
}
impl<'a> Mul<WrappedScalar> for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn mul(self, rhs: WrappedScalar) -> Self::Output {
*self * rhs
}
}
impl Mul for WrappedScalar {
type Output = Self;
#[inline]
fn mul(self, rhs: WrappedScalar) -> Self::Output {
WrappedScalar(self.0 * rhs.0)
}
}
impl MulAssign for WrappedScalar {
#[inline]
fn mul_assign(&mut self, rhs: Self) {
*self = *self * rhs;
}
}
impl<'b> MulAssign<&'b WrappedScalar> for WrappedScalar {
#[inline]
fn mul_assign(&mut self, rhs: &'b WrappedScalar) {
*self = *self * rhs;
}
}
impl<'a> Neg for &'a WrappedScalar {
type Output = WrappedScalar;
#[inline]
fn neg(self) -> Self::Output {
WrappedScalar(self.0.neg())
}
}
impl Neg for WrappedScalar {
type Output = Self;
#[inline]
fn neg(self) -> Self::Output {
-&self
}
}
impl From<WrappedScalar> for Scalar {
fn from(s: WrappedScalar) -> Scalar {
s.0
}
}
impl From<Scalar> for WrappedScalar {
fn from(s: Scalar) -> WrappedScalar {
Self(s)
}
}
impl zeroize::DefaultIsZeroes for WrappedScalar {}
#[test]
fn ristretto_to_edwards() {
let mut osrng = rand::rngs::OsRng::default();
let sk = Scalar::random(&mut osrng);
let pk = RISTRETTO_BASEPOINT_POINT * sk;
let ek = WrappedEdwards::from(WrappedRistretto(pk));
assert!(ek.0.is_torsion_free());
}
| 22.037037 | 98 | 0.591217 |
4bf24293773e6fb37184ba8398b2fd9eb85ec50f | 150 | pub mod category;
pub mod client;
pub mod product;
pub mod product_category;
pub mod request;
pub mod request_product;
pub mod support;
pub mod user;
| 16.666667 | 25 | 0.786667 |
f5fe00eb532966a055ebaea6e58df1b2182bf82b | 26,261 | use crate::*;
/// Describes one effect pass to evaluate a scene.
#[derive(Clone, Debug)]
pub struct Render {
/// Refers to a node that contains a camera describing the viewpoint
/// from which to render this compositing step.
pub camera_node: UrlRef<Node>,
/// Specifies which layer or layers to render in this compositing step
/// while evaluating the scene.
pub layers: Vec<String>,
/// Instantiates a COLLADA material resource. See [`InstanceEffectData`]
/// for the additional instance effect data.
pub instance_effect: Option<Instance<Effect>>,
}
impl Render {
/// Construct a new render pass.
pub fn new(camera_node: Url, layers: Vec<String>, instance_effect: Url) -> Self {
Self {
camera_node: Ref::new(camera_node),
layers,
instance_effect: Some(Instance::new(instance_effect)),
}
}
}
impl XNode for Render {
const NAME: &'static str = "render";
fn parse(element: &Element) -> Result<Self> {
debug_assert_eq!(element.name(), Self::NAME);
let mut it = element.children().peekable();
let res = Render {
camera_node: parse_attr(element.attr("camera_node"))?
.ok_or("missing camera_node attr")?,
layers: parse_list("layer", &mut it, parse_text)?,
instance_effect: Instance::parse_opt(&mut it)?,
};
finish(res, it)
}
}
impl XNodeWrite for Render {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
let mut e = Self::elem();
e.print_attr("camera_node", &self.camera_node);
let e = e.start(w)?;
many(&self.layers, |e| ElemBuilder::print_str("layer", e, w))?;
self.instance_effect.write_to(w)?;
e.end(w)
}
}
/// A shader element.
#[derive(Clone, Debug)]
pub enum Shader {
/// Produces a specularly shaded surface with a Blinn BRDF approximation.
Blinn(Blinn),
/// Produces a constantly shaded surface that is independent of lighting.
Constant(ConstantFx),
/// Produces a diffuse shaded surface that is independent of lighting.
Lambert(Lambert),
/// Produces a specularly shaded surface where the specular reflection is shaded
/// according the Phong BRDF approximation.
Phong(Phong),
}
impl From<Blinn> for Shader {
fn from(v: Blinn) -> Self {
Self::Blinn(v)
}
}
impl From<ConstantFx> for Shader {
fn from(v: ConstantFx) -> Self {
Self::Constant(v)
}
}
impl From<Lambert> for Shader {
fn from(v: Lambert) -> Self {
Self::Lambert(v)
}
}
impl From<Phong> for Shader {
fn from(v: Phong) -> Self {
Self::Phong(v)
}
}
impl Shader {
/// Parse a [`Shader`] from an XML element.
pub fn parse(e: &Element) -> Result<Option<Self>> {
Ok(Some(match e.name() {
Blinn::NAME => Self::Blinn(Blinn::parse(e)?),
ConstantFx::NAME => Self::Constant(ConstantFx::parse(e)?),
Lambert::NAME => Self::Lambert(Lambert::parse(e)?),
Phong::NAME => Self::Phong(Phong::parse(e)?),
_ => return Ok(None),
}))
}
/// Run the function `f` on all arguments of type [`Texture`] in the parameters to this shader.
pub fn on_textures<'a, E>(
&'a self,
f: &mut impl FnMut(&'a Texture) -> Result<(), E>,
) -> Result<(), E> {
match self {
Self::Blinn(s) => s.on_textures(f),
Self::Constant(s) => s.on_textures(f),
Self::Lambert(s) => s.on_textures(f),
Self::Phong(s) => s.on_textures(f),
}
}
}
impl XNodeWrite for Shader {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
match self {
Self::Blinn(e) => e.write_to(w),
Self::Constant(e) => e.write_to(w),
Self::Lambert(e) => e.write_to(w),
Self::Phong(e) => e.write_to(w),
}
}
}
/// Produces a specularly shaded surface with a Blinn BRDF approximation.
#[derive(Clone, Default, Debug)]
pub struct Blinn {
/// Declares the amount of light emitted from the surface of this object.
pub emission: Option<WithSid<ColorParam>>,
/// Declares the amount of ambient light emitted from the surface of this object.
pub ambient: Option<WithSid<ColorParam>>,
/// Declares the amount of light diffusely reflected from the surface of this object.
pub diffuse: Option<WithSid<ColorParam>>,
/// Declares the color of light specularly reflected from the surface of this object.
pub specular: Option<WithSid<ColorParam>>,
/// Declares the specularity or roughness of the specular reflection lobe.
pub shininess: Option<WithSid<FloatParam>>,
/// Declares the color of a perfect mirror reflection.
pub reflective: Option<WithSid<ColorParam>>,
/// Declares the amount of perfect mirror reflection to be added
/// to the reflected light as a value between 0.0 and 1.0.
pub reflectivity: Option<WithSid<FloatParam>>,
/// Declares the color of perfectly refracted light.
pub transparent: Option<WithSid<ColorParam>>,
/// Declares the amount of perfectly refracted light added
/// to the reflected color as a scalar value between 0.0 and 1.0.
pub transparency: Option<WithSid<FloatParam>>,
/// Declares the index of refraction for perfectly refracted light
/// as a single scalar index.
pub index_of_refraction: Option<WithSid<FloatParam>>,
}
impl XNode for Blinn {
const NAME: &'static str = "blinn";
fn parse(element: &Element) -> Result<Self> {
debug_assert_eq!(element.name(), Self::NAME);
let mut it = element.children().peekable();
Ok(Blinn {
emission: parse_opt("emission", &mut it, WithSid::parse)?,
ambient: parse_opt("ambient", &mut it, WithSid::parse)?,
diffuse: parse_opt("diffuse", &mut it, WithSid::parse)?,
specular: parse_opt("specular", &mut it, WithSid::parse)?,
shininess: parse_opt("shininess", &mut it, WithSid::parse)?,
reflective: parse_opt("reflective", &mut it, WithSid::parse)?,
reflectivity: parse_opt("reflectivity", &mut it, WithSid::parse)?,
transparent: parse_opt("transparent", &mut it, WithSid::parse)?,
transparency: parse_opt("transparency", &mut it, WithSid::parse)?,
index_of_refraction: parse_opt("index_of_refraction", &mut it, WithSid::parse)?,
})
}
}
impl XNodeWrite for Blinn {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
let e = Self::elem().start(w)?;
WithSid::write_opt(&self.emission, "emission", w)?;
WithSid::write_opt(&self.ambient, "ambient", w)?;
WithSid::write_opt(&self.diffuse, "diffuse", w)?;
WithSid::write_opt(&self.specular, "specular", w)?;
WithSid::write_opt(&self.shininess, "shininess", w)?;
WithSid::write_opt(&self.reflective, "reflective", w)?;
WithSid::write_opt(&self.reflectivity, "reflectivity", w)?;
WithSid::write_opt(&self.transparent, "transparent", w)?;
WithSid::write_opt(&self.transparency, "transparency", w)?;
WithSid::write_opt(&self.index_of_refraction, "index_of_refraction", w)?;
e.end(w)
}
}
impl Blinn {
/// Run the function `f` on all arguments of type [`Texture`] in the parameters to this shader.
pub fn on_textures<'a, E>(
&'a self,
f: &mut impl FnMut(&'a Texture) -> Result<(), E>,
) -> Result<(), E> {
on_color_as_texture(&self.emission, f)?;
on_color_as_texture(&self.ambient, f)?;
on_color_as_texture(&self.diffuse, f)?;
on_color_as_texture(&self.specular, f)?;
on_color_as_texture(&self.reflective, f)?;
on_color_as_texture(&self.transparent, f)
}
}
/// Produces a constantly shaded surface that is independent of lighting.
#[derive(Clone, Default, Debug)]
pub struct ConstantFx {
/// Declares the amount of light emitted from the surface of this object.
pub emission: Option<WithSid<ColorParam>>,
/// Declares the color of a perfect mirror reflection.
pub reflective: Option<WithSid<ColorParam>>,
/// Declares the amount of perfect mirror reflection to be added
/// to the reflected light as a value between 0.0 and 1.0.
pub reflectivity: Option<WithSid<FloatParam>>,
/// Declares the color of perfectly refracted light.
pub transparent: Option<WithSid<ColorParam>>,
/// Declares the amount of perfectly refracted light added
/// to the reflected color as a scalar value between 0.0 and 1.0.
pub transparency: Option<WithSid<FloatParam>>,
/// Declares the index of refraction for perfectly refracted light
/// as a single scalar index.
pub index_of_refraction: Option<WithSid<FloatParam>>,
}
impl XNode for ConstantFx {
const NAME: &'static str = "constant";
fn parse(element: &Element) -> Result<Self> {
debug_assert_eq!(element.name(), Self::NAME);
let mut it = element.children().peekable();
Ok(ConstantFx {
emission: parse_opt("emission", &mut it, WithSid::parse)?,
reflective: parse_opt("reflective", &mut it, WithSid::parse)?,
reflectivity: parse_opt("reflectivity", &mut it, WithSid::parse)?,
transparent: parse_opt("transparent", &mut it, WithSid::parse)?,
transparency: parse_opt("transparency", &mut it, WithSid::parse)?,
index_of_refraction: parse_opt("index_of_refraction", &mut it, WithSid::parse)?,
})
}
}
impl XNodeWrite for ConstantFx {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
let e = Self::elem().start(w)?;
WithSid::write_opt(&self.emission, "emission", w)?;
WithSid::write_opt(&self.reflective, "reflective", w)?;
WithSid::write_opt(&self.reflectivity, "reflectivity", w)?;
WithSid::write_opt(&self.transparent, "transparent", w)?;
WithSid::write_opt(&self.transparency, "transparency", w)?;
WithSid::write_opt(&self.index_of_refraction, "index_of_refraction", w)?;
e.end(w)
}
}
impl ConstantFx {
/// Run the function `f` on all arguments of type [`Texture`] in the parameters to this shader.
pub fn on_textures<'a, E>(
&'a self,
f: &mut impl FnMut(&'a Texture) -> Result<(), E>,
) -> Result<(), E> {
on_color_as_texture(&self.emission, f)?;
on_color_as_texture(&self.reflective, f)?;
on_color_as_texture(&self.transparent, f)
}
}
/// Produces a diffuse shaded surface that is independent of lighting.
#[derive(Clone, Default, Debug)]
pub struct Lambert {
/// Declares the amount of light emitted from the surface of this object.
pub emission: Option<WithSid<ColorParam>>,
/// Declares the amount of ambient light emitted from the surface of this object.
pub ambient: Option<WithSid<ColorParam>>,
/// Declares the amount of light diffusely reflected from the surface of this object.
pub diffuse: Option<WithSid<ColorParam>>,
/// Declares the color of a perfect mirror reflection.
pub reflective: Option<WithSid<ColorParam>>,
/// Declares the amount of perfect mirror reflection to be added
/// to the reflected light as a value between 0.0 and 1.0.
pub reflectivity: Option<WithSid<FloatParam>>,
/// Declares the color of perfectly refracted light.
pub transparent: Option<WithSid<ColorParam>>,
/// Declares the amount of perfectly refracted light added
/// to the reflected color as a scalar value between 0.0 and 1.0.
pub transparency: Option<WithSid<FloatParam>>,
/// Declares the index of refraction for perfectly refracted light
/// as a single scalar index.
pub index_of_refraction: Option<WithSid<FloatParam>>,
}
impl XNode for Lambert {
const NAME: &'static str = "lambert";
fn parse(element: &Element) -> Result<Self> {
debug_assert_eq!(element.name(), Self::NAME);
let mut it = element.children().peekable();
Ok(Lambert {
emission: parse_opt("emission", &mut it, WithSid::parse)?,
ambient: parse_opt("ambient", &mut it, WithSid::parse)?,
diffuse: parse_opt("diffuse", &mut it, WithSid::parse)?,
reflective: parse_opt("reflective", &mut it, WithSid::parse)?,
reflectivity: parse_opt("reflectivity", &mut it, WithSid::parse)?,
transparent: parse_opt("transparent", &mut it, WithSid::parse)?,
transparency: parse_opt("transparency", &mut it, WithSid::parse)?,
index_of_refraction: parse_opt("index_of_refraction", &mut it, WithSid::parse)?,
})
}
}
impl XNodeWrite for Lambert {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
let e = Self::elem().start(w)?;
WithSid::write_opt(&self.emission, "emission", w)?;
WithSid::write_opt(&self.ambient, "ambient", w)?;
WithSid::write_opt(&self.diffuse, "diffuse", w)?;
WithSid::write_opt(&self.reflective, "reflective", w)?;
WithSid::write_opt(&self.reflectivity, "reflectivity", w)?;
WithSid::write_opt(&self.transparent, "transparent", w)?;
WithSid::write_opt(&self.transparency, "transparency", w)?;
WithSid::write_opt(&self.index_of_refraction, "index_of_refraction", w)?;
e.end(w)
}
}
impl Lambert {
/// Run the function `f` on all arguments of type [`Texture`] in the parameters to this shader.
pub fn on_textures<'a, E>(
&'a self,
f: &mut impl FnMut(&'a Texture) -> Result<(), E>,
) -> Result<(), E> {
on_color_as_texture(&self.emission, f)?;
on_color_as_texture(&self.ambient, f)?;
on_color_as_texture(&self.diffuse, f)?;
on_color_as_texture(&self.reflective, f)?;
on_color_as_texture(&self.transparent, f)
}
}
/// Produces a specularly shaded surface where the specular reflection is shaded
/// according the Phong BRDF approximation.
#[derive(Clone, Default, Debug)]
pub struct Phong {
/// Declares the amount of light emitted from the surface of this object.
pub emission: Option<WithSid<ColorParam>>,
/// Declares the amount of ambient light emitted from the surface of this object.
pub ambient: Option<WithSid<ColorParam>>,
/// Declares the amount of light diffusely reflected from the surface of this object.
pub diffuse: Option<WithSid<ColorParam>>,
/// the surface of this object. the surface of this object.
pub specular: Option<WithSid<ColorParam>>,
/// reflection lobe.reflection lobe.
pub shininess: Option<WithSid<FloatParam>>,
/// Declares the color of a perfect mirror reflection.
pub reflective: Option<WithSid<ColorParam>>,
/// Declares the amount of perfect mirror reflection to be added
/// to the reflected light as a value between 0.0 and 1.0.
pub reflectivity: Option<WithSid<FloatParam>>,
/// Declares the color of perfectly refracted light.
pub transparent: Option<WithSid<ColorParam>>,
/// Declares the amount of perfectly refracted light added
/// to the reflected color as a scalar value between 0.0 and 1.0.
pub transparency: Option<WithSid<FloatParam>>,
/// Declares the index of refraction for perfectly refracted light
/// as a single scalar index.
pub index_of_refraction: Option<WithSid<FloatParam>>,
}
impl XNode for Phong {
const NAME: &'static str = "phong";
fn parse(element: &Element) -> Result<Self> {
debug_assert_eq!(element.name(), Self::NAME);
let mut it = element.children().peekable();
Ok(Phong {
emission: parse_opt("emission", &mut it, WithSid::parse)?,
ambient: parse_opt("ambient", &mut it, WithSid::parse)?,
diffuse: parse_opt("diffuse", &mut it, WithSid::parse)?,
specular: parse_opt("specular", &mut it, WithSid::parse)?,
shininess: parse_opt("shininess", &mut it, WithSid::parse)?,
reflective: parse_opt("reflective", &mut it, WithSid::parse)?,
reflectivity: parse_opt("reflectivity", &mut it, WithSid::parse)?,
transparent: parse_opt("transparent", &mut it, WithSid::parse)?,
transparency: parse_opt("transparency", &mut it, WithSid::parse)?,
index_of_refraction: parse_opt("index_of_refraction", &mut it, WithSid::parse)?,
})
}
}
impl XNodeWrite for Phong {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
let e = Self::elem().start(w)?;
WithSid::write_opt(&self.emission, "emission", w)?;
WithSid::write_opt(&self.ambient, "ambient", w)?;
WithSid::write_opt(&self.diffuse, "diffuse", w)?;
WithSid::write_opt(&self.specular, "specular", w)?;
WithSid::write_opt(&self.shininess, "shininess", w)?;
WithSid::write_opt(&self.reflective, "reflective", w)?;
WithSid::write_opt(&self.reflectivity, "reflectivity", w)?;
WithSid::write_opt(&self.transparent, "transparent", w)?;
WithSid::write_opt(&self.transparency, "transparency", w)?;
WithSid::write_opt(&self.index_of_refraction, "index_of_refraction", w)?;
e.end(w)
}
}
impl Phong {
/// Run the function `f` on all arguments of type [`Texture`] in the parameters to this shader.
pub fn on_textures<'a, E>(
&'a self,
f: &mut impl FnMut(&'a Texture) -> Result<(), E>,
) -> Result<(), E> {
on_color_as_texture(&self.emission, f)?;
on_color_as_texture(&self.ambient, f)?;
on_color_as_texture(&self.diffuse, f)?;
on_color_as_texture(&self.specular, f)?;
on_color_as_texture(&self.reflective, f)?;
on_color_as_texture(&self.transparent, f)
}
}
/// A struct that attaches an optional SID to a shader parameter.
#[derive(Clone, Default, Debug)]
pub struct WithSid<T> {
sid: Option<String>,
data: T,
}
impl<T> Deref for WithSid<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
pub(crate) use private::CanWithSid;
pub(crate) mod private {
use super::*;
pub trait CanWithSid: XNodeWrite + Sized {
fn parse(element: &Element) -> Result<Option<Self>>;
fn write_with_sid<W: Write>(&self, sid: &Option<String>, w: &mut XWriter<W>) -> Result<()>;
}
}
impl<T> From<T> for WithSid<T> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<T> WithSid<T> {
/// Construct a new `WithSid` with no sid.
pub fn new(data: T) -> Self {
Self { sid: None, data }
}
/// Construct a new `WithSid` with a sid.
#[allow(clippy::self_named_constructors)]
pub fn with_sid(sid: impl Into<String>, data: T) -> Self {
Self {
sid: Some(sid.into()),
data,
}
}
}
impl<T: CanWithSid> WithSid<T> {
/// Parse a [`WithSid<T>`] from an XML element.
pub fn parse(element: &Element) -> Result<Self> {
let mut it = element.children().peekable();
parse_one_many(&mut it, |e| {
Ok(T::parse(e)?.map(|data| Self {
sid: e.attr("sid").map(Into::into),
data,
}))
})
}
fn write_opt(this: &Option<Self>, name: &str, w: &mut XWriter<impl Write>) -> Result<()> {
opt(this, |this| {
let elem = ElemBuilder::new(name).start(w)?;
this.write_to(w)?;
elem.end(w)
})
}
}
impl<T: CanWithSid> XNodeWrite for WithSid<T> {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
self.data.write_with_sid(&self.sid, w)
}
}
/// A type that describes color attributes of fixed-function shader elements inside
/// [`ProfileCommon`] effects.
#[derive(Clone, Debug)]
pub enum ColorParam {
/// The value is a literal color, specified by four floating-point numbers in RGBA order.
Color(Box<[f32; 4]>),
/// The value is specified by a reference to a previously defined parameter
/// in the current scope that can be cast directly to a `float4`.
Param(Box<str>),
/// The value is specified by a reference to a previously defined `sampler2D` object.
Texture(Box<Texture>),
}
impl From<[f32; 4]> for ColorParam {
fn from(rgba: [f32; 4]) -> Self {
Self::color(rgba)
}
}
impl From<[f32; 4]> for WithSid<ColorParam> {
fn from(rgba: [f32; 4]) -> Self {
WithSid::new(rgba.into())
}
}
impl From<Texture> for ColorParam {
fn from(tex: Texture) -> Self {
Self::Texture(Box::new(tex))
}
}
impl From<Texture> for WithSid<ColorParam> {
fn from(tex: Texture) -> Self {
WithSid::new(tex.into())
}
}
impl ColorParam {
/// Construct a new `ColorParam` from a color.
pub fn color(rgba: [f32; 4]) -> Self {
Self::Color(Box::new(rgba))
}
}
impl CanWithSid for ColorParam {
fn parse(e: &Element) -> Result<Option<Self>> {
Ok(Some(match e.name() {
"color" => Self::Color(parse_array_n(e)?),
Param::NAME => Self::Param(e.attr("ref").ok_or("expected ref attr")?.into()),
Texture::NAME => Self::Texture(Texture::parse_box(e)?),
_ => return Ok(None),
}))
}
fn write_with_sid<W: Write>(&self, sid: &Option<String>, w: &mut XWriter<W>) -> Result<()> {
match self {
Self::Color(arr) => {
let mut e = ElemBuilder::new("color");
e.opt_attr("sid", sid);
let e = e.start(w)?;
print_arr(&**arr, w)?;
e.end(w)
}
Self::Param(ref_) => {
let mut e = ElemBuilder::new(Param::NAME);
e.opt_attr("sid", sid);
e.attr("ref", ref_);
e.end(w)
}
Self::Texture(e) => e.write_to(w),
}
}
}
impl XNodeWrite for ColorParam {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
self.write_with_sid(&None, w)
}
}
impl ColorParam {
/// Convert this parameter to a texture reference, if it is one.
pub fn as_texture(&self) -> Option<&Texture> {
match self {
ColorParam::Texture(tex) => Some(tex),
_ => None,
}
}
/// Get the color literal of this parameter, if it is a literal.
pub fn as_color(&self) -> Option<&[f32; 4]> {
match self {
ColorParam::Color(c) => Some(c),
_ => None,
}
}
}
/// A type that describes the scalar attributes of fixed-function shader elements inside
/// [`ProfileCommon`] effects.
#[derive(Clone, Debug)]
pub enum FloatParam {
/// The value is represented by a literal floating-point scalar.
Float(f32),
/// The value is represented by a reference to a previously
/// defined parameter that can be directly cast to a floating-point scalar.
Param(Box<str>),
}
impl From<f32> for FloatParam {
fn from(val: f32) -> Self {
Self::Float(val)
}
}
impl From<f32> for WithSid<FloatParam> {
fn from(val: f32) -> Self {
WithSid::new(val.into())
}
}
impl CanWithSid for FloatParam {
fn parse(e: &Element) -> Result<Option<Self>> {
Ok(Some(match e.name() {
"float" => Self::Float(parse_elem(e)?),
Param::NAME => Self::Param(e.attr("ref").ok_or("expected ref attr")?.into()),
_ => return Ok(None),
}))
}
fn write_with_sid<W: Write>(&self, sid: &Option<String>, w: &mut XWriter<W>) -> Result<()> {
match self {
Self::Float(val) => {
let mut e = ElemBuilder::new("float");
e.opt_attr("sid", sid);
let e = e.start(w)?;
print_elem(val, w)?;
e.end(w)
}
Self::Param(ref_) => {
let mut e = ElemBuilder::new(Param::NAME);
e.opt_attr("sid", sid);
e.attr("ref", ref_);
e.end(w)
}
}
}
}
impl XNodeWrite for FloatParam {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
self.write_with_sid(&None, w)
}
}
/// A color parameter referencing a texture.
#[derive(Clone, Debug)]
pub struct Texture {
/// The texture to reference.
pub texture: String,
/// A semantic token, which will be referenced within
/// [`BindMaterial`] to bind an array of texcoords from a
/// [`Geometry`] instance to the `TextureUnit`.
pub texcoord: String,
/// Provides arbitrary additional information about this element.
pub extra: Option<Box<Extra>>,
}
impl Texture {
/// Construct a new `Texture` from the mandatory data.
pub fn new(texture: impl Into<String>, texcoord: impl Into<String>) -> Self {
Self {
texture: texture.into(),
texcoord: texcoord.into(),
extra: None,
}
}
fn write_with_sid<W: Write>(&self, sid: &Option<String>, w: &mut XWriter<W>) -> Result<()> {
let mut e = Self::elem();
e.opt_attr("sid", sid);
e.attr("texture", &self.texture);
e.attr("texcoord", &self.texcoord);
if let Some(extra) = &self.extra {
let e = e.start(w)?;
extra.write_to(w)?;
e.end(w)
} else {
e.end(w)
}
}
}
impl XNode for Texture {
const NAME: &'static str = "texture";
fn parse(e: &Element) -> Result<Self> {
let mut it = e.children().peekable();
let res = Texture {
texture: e.attr("texture").ok_or("expected texture attr")?.into(),
texcoord: e.attr("texcoord").ok_or("expected texcoord attr")?.into(),
extra: Extra::parse_opt_box(&mut it)?,
};
finish(res, it)
}
}
impl XNodeWrite for Texture {
fn write_to<W: Write>(&self, w: &mut XWriter<W>) -> Result<()> {
self.write_with_sid(&None, w)
}
}
fn on_color_as_texture<'a, E>(
opt: &'a Option<WithSid<ColorParam>>,
f: &mut impl FnMut(&'a Texture) -> Result<(), E>,
) -> Result<(), E> {
if let Some(WithSid {
data: ColorParam::Texture(tex),
..
}) = opt
{
f(tex)?
}
Ok(())
}
| 36.423024 | 99 | 0.606413 |
38581043b5e4dcea54375d6173ed9c1c7afe3415 | 631 | use crate::ids::{SpellSpecId};
use std::collections::{HashMap};
pub struct SpellSpec {
pub name: &'static str,
pub description: &'static str,
pub damage: i32,
pub speed: f32,
pub behaviour_name: &'static str,
}
lazy_static! {
pub static ref SPELL_SPECIFICATIONS: HashMap<SpellSpecId, SpellSpec> = vec![SpellSpec {
name: "Fire ball",
description: "A fire ball that cause burns when explode",
damage: 5,
speed: 15.0,
behaviour_name: "Explotable ball",
}]
.into_iter()
.enumerate()
.map(|(index, def)| (SpellSpecId(index + 1), def))
.collect();
}
| 24.269231 | 91 | 0.618067 |
08cce7602e025a775a3e3034df3d3d7fe5860c59 | 3,802 | use seed::prelude::*;
use crate::{
messages::{
authentication::AuthMsg,
cards::{AddCardSuccessPayload, CardsMsg, GetCardsPayload},
decks::{DecksMsg, GetDecksPayload},
routing::RoutingMsg,
session::SessionMsg,
sets::{AddSetSuccessPayload, GetSetsPayload, SetsMsg},
Msg,
},
state::{routing::Route, Model},
};
pub fn operate(msg: &Msg, model: &Model, orders: &mut impl Orders<Msg>) {
match msg {
Msg::Routing(RoutingMsg::Push(r)) => {
Url::from(r).go_and_push();
orders.send_msg(Msg::Routing(RoutingMsg::Navigate(r.clone())));
}
Msg::Authentication(AuthMsg::LoginSuccess(payload)) => {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::Decks(
payload.username.clone(),
))));
}
Msg::Authentication(AuthMsg::RegistrationSuccess) => {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::Login)));
}
Msg::Decks(DecksMsg::AddDeckSuccess(_)) => {
if let Some(username) = &model.authentication.username {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::Decks(
username.to_owned(),
))));
}
}
Msg::Decks(DecksMsg::DeleteDeckSuccess(_)) => {
if let Some(username) = &model.authentication.username {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::Decks(
username.to_owned(),
))));
}
}
Msg::Sets(SetsMsg::AddSetSuccess(AddSetSuccessPayload { deck_id })) => {
if let Some(username) = &model.authentication.username {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::DeckSets(
username.to_owned(),
*deck_id,
))));
}
}
Msg::Cards(CardsMsg::AddCardSuccess(AddCardSuccessPayload { deck_id })) => {
if let Some(username) = &model.authentication.username {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::DeckCards(
username.to_owned(),
*deck_id,
))));
}
}
Msg::Cards(CardsMsg::DeleteCardSuccess(_)) => {
let _ = seed::history().back();
}
Msg::Cards(CardsMsg::EditCardLinkSuccess(x)) => {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::CardDetails(x.card_id))));
}
Msg::Sets(SetsMsg::DeleteSetSuccess(_)) => {
let _ = seed::history().back();
}
Msg::Session(SessionMsg::Study(_)) => {
orders.send_msg(Msg::Routing(RoutingMsg::Push(Route::Study)));
}
Msg::Routing(RoutingMsg::Navigate(x)) => {
orders.send_msg(Msg::Routing(RoutingMsg::ModalOpen(false)));
match x {
Route::Decks(username) => {
orders.send_msg(Msg::Decks(DecksMsg::GetDecks(GetDecksPayload {
username: username.clone(),
})));
}
Route::DeckCards(_, deck_id) => {
orders.send_msg(Msg::Cards(CardsMsg::GetCards(GetCardsPayload {
deck_id: *deck_id,
})));
}
Route::DeckSets(_, deck_id) => {
orders.send_msg(Msg::Sets(SetsMsg::GetSets(GetSetsPayload {
deck_id: *deck_id,
})));
}
Route::AddDeck(_) => {
orders.send_msg(Msg::Decks(DecksMsg::GetLanguages));
},
_ => {}
}
}
_ => {}
}
}
| 32.775862 | 91 | 0.498159 |
5643eb561ce67011d6ce869887d633a1eb6cd595 | 13,611 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{format_err, Context, Error},
fidl::endpoints::create_endpoints,
fidl_fuchsia_net_stack::StackMarker,
fidl_fuchsia_netemul_sync::{BusMarker, BusProxy, Event, SyncManagerMarker},
fidl_fuchsia_netstack::{NetstackMarker, RouteTableEntry2, RouteTableTransactionMarker},
fuchsia_async as fasync,
fuchsia_component::client,
fuchsia_component::client::connect_to_service,
fuchsia_syslog::{fx_log_err, fx_log_info},
futures::TryStreamExt,
net_declare::fidl_ip,
prettytable::{cell, format, row, Table},
std::convert::TryFrom,
std::io::{Read, Write},
std::net::{SocketAddr, TcpListener, TcpStream},
structopt::StructOpt,
};
const BUS_NAME: &str = "test-bus";
const WEAVE_NODE_NAME: &str = "weave-node";
const FUCHSIA_NODE_NAME: &str = "fuchsia-node";
const WLAN_NODE_NAME: &str = "wlan-node";
const WPAN_NODE_NAME: &str = "wpan-node";
const WLAN_NODE_1_NAME: &str = "wlan-node-1";
const WPAN_SERVER_NODE_NAME: &str = "wpan-server-node";
const HELLO_MSG_REQ: &str = "Hello World from TCP Client!";
const HELLO_MSG_RSP: &str = "Hello World from TCP Server!";
const WEAVE_SERVER_NODE_DONE: i32 = 1;
const WPAN_SERVER_NODE_DONE: i32 = 2;
const ENTRY_METRICS: u32 = 256;
pub struct BusConnection {
bus: BusProxy,
}
impl BusConnection {
pub fn new(client: &str) -> Result<BusConnection, Error> {
let busm = client::connect_to_service::<SyncManagerMarker>()
.context("SyncManager not available")?;
let (bus, busch) = fidl::endpoints::create_proxy::<BusMarker>()?;
busm.bus_subscribe(BUS_NAME, client, busch)?;
Ok(BusConnection { bus })
}
pub async fn wait_for_client(&mut self, expect: &'static str) -> Result<(), Error> {
let _ = self.bus.wait_for_clients(&mut vec![expect].drain(..), 0).await?;
Ok(())
}
pub fn publish_code(&self, code: i32) -> Result<(), Error> {
self.bus.publish(Event {
code: Some(code),
message: None,
arguments: None,
..Event::EMPTY
})?;
Ok(())
}
pub async fn wait_for_event(&self, mut code_vec: Vec<i32>) -> Result<(), Error> {
let mut stream = self.bus.take_event_stream();
loop {
match stream.try_next().await? {
Some(fidl_fuchsia_netemul_sync::BusEvent::OnBusData { data }) => match data.code {
Some(rcv_code) => {
if code_vec.contains(&rcv_code) {
code_vec.retain(|&x| x != rcv_code);
} else {
fx_log_err!("unexpected rcv_code: {:?}", rcv_code);
return Err(format_err!("unexpected rcv_code in wait_for_event"));
}
}
None => {
return Err(format_err!("data.code contains no event"));
}
},
_ => {}
};
if code_vec.is_empty() {
break;
}
}
Ok(())
}
}
fn get_interface_id(
name: &str,
intf: &Vec<fidl_fuchsia_net_stack::InterfaceInfo>,
) -> Result<u64, Error> {
let res = intf
.iter()
.find_map(
|interface| if interface.properties.name == name { Some(interface.id) } else { None },
)
.ok_or(anyhow::format_err!("failed to find {}", name))?;
Ok(res)
}
async fn add_route_table_entry(
dest: fidl_fuchsia_net::IpAddress,
netmask: fidl_fuchsia_net::IpAddress,
nicid: u64,
route_proxy: &fidl_fuchsia_netstack::RouteTableTransactionProxy,
) -> Result<(), Error> {
let mut entry = RouteTableEntry2 {
destination: dest,
netmask: netmask,
gateway: None,
nicid: u32::try_from(nicid)?,
metric: ENTRY_METRICS,
};
let zx_status = route_proxy
.add_route(&mut entry)
.await
.with_context(|| format!("error in route_proxy.add_route {:?}", entry))?;
if zx_status != 0 {
return Err(format_err!("error in route_proxy.add_route, zx_status {}", zx_status));
}
Ok(())
}
async fn run_fuchsia_node() -> Result<(), Error> {
let stack =
client::connect_to_service::<StackMarker>().context("failed to connect to netstack")?;
let netstack =
connect_to_service::<NetstackMarker>().context("failed to connect to netstack")?;
let intf = stack.list_interfaces().await.context("getting interfaces")?;
let wlan_if_id = get_interface_id("wlan-f-ep", &intf)?;
let wpan_if_id = get_interface_id("wpan-f-ep", &intf)?;
let weave_if_id = get_interface_id("weave-f-ep", &intf)?;
fx_log_info!("wlan intf: {:?}", wlan_if_id);
fx_log_info!("wpan intf: {:?}", wpan_if_id);
fx_log_info!("weave intf: {:?}", weave_if_id);
let (client_end, server_end) =
create_endpoints::<RouteTableTransactionMarker>().context("error creating endpoint")?;
let zx_status = netstack
.start_route_table_transaction(server_end)
.await
.context("error start_route_table_transaction")?;
if zx_status != 0 {
return Err(format_err!(
"error in netstack.start_route_table_transaction, zx_status {}",
zx_status
));
}
let route_proxy = client_end.into_proxy().context("error route_proxy.into_proxy")?;
// routing rules for weave tun
let () = add_route_table_entry(
fidl_ip!(fdce:da10:7616:6:6616:6600:4734:b051),
fidl_ip!(ffff: ffff: ffff: ffff: ffff: ffff: ffff: ffff),
weave_if_id,
&route_proxy,
)
.await
.context("adding routing table entry for weave tun")?;
let () = add_route_table_entry(
fidl_ip!(fdce:da10:7616::),
fidl_ip!(ffff:ffff:ffff::),
weave_if_id,
&route_proxy,
)
.await
.context("adding routing table entry for weave tun")?;
// routing rules for wpan
let () = add_route_table_entry(
fidl_ip!(fdce:da10:7616:6::),
fidl_ip!(ffff:ffff:ffff:ffff::),
wpan_if_id,
&route_proxy,
)
.await
.context("adding routing table entry for wpan")?;
let () = add_route_table_entry(
fidl_ip!(fdd3:b786:54dc::),
fidl_ip!(ffff:ffff:ffff:ffff::),
wpan_if_id,
&route_proxy,
)
.await
.context("adding routing table entry for wpan")?;
// routing rules for wlan
let () = add_route_table_entry(
fidl_ip!(fdce:da10:7616:1::),
fidl_ip!(ffff:ffff:ffff:ffff::),
wlan_if_id,
&route_proxy,
)
.await
.context("adding routing table entry for wlan")?;
fx_log_info!("successfully added entries to route table");
let route_table =
netstack.get_route_table2().await.context("error retrieving routing table")?;
let mut t = Table::new();
t.set_format(format::FormatBuilder::new().padding(2, 2).build());
t.set_titles(row!["Destination", "Netmask", "Gateway", "NICID", "Metric"]);
for entry in route_table {
let route = fidl_fuchsia_netstack_ext::RouteTableEntry2::from(entry);
let gateway_str = match route.gateway {
None => "-".to_string(),
Some(g) => format!("{}", g),
};
t.add_row(row![route.destination, route.netmask, gateway_str, route.nicid, route.metric]);
}
fx_log_info!("{}", t.printstd());
let () = stack.enable_ip_forwarding().await.context("failed to enable ip forwarding")?;
let bus = BusConnection::new(FUCHSIA_NODE_NAME)?;
fx_log_info!("waiting for server to finish...");
let () = bus.wait_for_event(vec![WEAVE_SERVER_NODE_DONE, WPAN_SERVER_NODE_DONE]).await?;
fx_log_info!("fuchsia node exited");
Ok(())
}
async fn handle_request(mut stream: TcpStream, remote: SocketAddr) -> Result<(), Error> {
fx_log_info!("accepted connection from {}", remote);
let mut buffer = [0; 512];
let rd = stream.read(&mut buffer).context("read failed")?;
let req = String::from_utf8(buffer[0..rd].to_vec()).context("not a valid utf8")?;
if req != HELLO_MSG_REQ {
return Err(format_err!("Got unexpected request from client: {}", req));
}
fx_log_info!("Got request {}", req);
let bytes_written = stream.write(HELLO_MSG_RSP.as_bytes()).context("write failed")?;
if bytes_written != HELLO_MSG_RSP.len() {
return Err(format_err!("response not fully written to TCP stream: {}", bytes_written));
}
stream.flush().context("flush failed")
}
async fn run_server_node(
listen_addrs: Vec<String>,
conn_nums: Vec<u32>,
node_name: &str,
node_code: i32,
) -> Result<(), Error> {
let mut listener_vec = Vec::new();
for listen_addr in listen_addrs {
listener_vec.push(TcpListener::bind(listen_addr).context("Can't bind to address")?);
}
fx_log_info!("server {} for connections...", node_name);
let bus = BusConnection::new(node_name)?;
for listener_idx in 0..listener_vec.len() {
let mut handler_futs = Vec::new();
for _ in 0..conn_nums[listener_idx] {
let (stream, remote) = listener_vec[listener_idx].accept().unwrap();
handler_futs.push(handle_request(stream, remote));
}
for handler_fut in handler_futs {
let () = handler_fut.await?;
}
}
let () = bus.publish_code(node_code)?;
fx_log_info!("server {} exited successfully", node_name);
Ok(())
}
async fn get_test_fut_client(connect_addr: String) -> Result<(), Error> {
let mut stream = TcpStream::connect(connect_addr.clone()).context("Tcp connection failed")?;
let request = HELLO_MSG_REQ.as_bytes();
stream.write(request)?;
stream.flush()?;
let mut buffer = [0; 512];
let rd = stream.read(&mut buffer)?;
let rsp = String::from_utf8(buffer[0..rd].to_vec()).context("not a valid utf8")?;
fx_log_info!("got response {} from {}", rsp, connect_addr);
if rsp != HELLO_MSG_RSP {
return Err(format_err!("Got unexpected echo from server: {}", rsp));
}
Ok(())
}
async fn run_client_node(
connect_addrs: Vec<String>,
node_name: &str,
server_node_names: Vec<&'static str>,
) -> Result<(), Error> {
let mut bus = BusConnection::new(node_name)?;
fx_log_info!("client {} is up and for fuchsia node to start", node_name);
let () = bus.wait_for_client(FUCHSIA_NODE_NAME).await?;
for server_node_name in server_node_names {
fx_log_info!("waiting for server node {} to start...", server_node_name);
let () = bus.wait_for_client(server_node_name).await?;
}
let mut fut_vec = Vec::new();
for connect_addr in connect_addrs {
fx_log_info!("connecting to {}...", connect_addr);
fut_vec.push(get_test_fut_client(connect_addr));
fx_log_info!("succeed");
}
for fut in fut_vec {
fut.await?;
}
fx_log_info!("client {} exited", node_name);
Ok(())
}
#[derive(StructOpt, Debug)]
enum Opt {
#[structopt(name = "weave-node")]
WeaveNode { listen_addr_0: String, listen_addr_1: String },
#[structopt(name = "fuchsia-node")]
FuchsiaNode,
#[structopt(name = "wpan-node")]
WpanNode { connect_addr_0: String, connect_addr_1: String, listen_addr_0: String },
#[structopt(name = "wlan-node")]
WlanNode { connect_addr_0: String, connect_addr_1: String, connect_addr_2: String },
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
let opt = Opt::from_args();
let node_name_str = match opt {
Opt::WeaveNode { .. } => "weave_node",
Opt::FuchsiaNode => "fuchsia_node",
Opt::WlanNode { .. } => "wlan_node",
Opt::WpanNode { .. } => "wpan_node",
};
fuchsia_syslog::init_with_tags(&[node_name_str])?;
match opt {
Opt::WeaveNode { listen_addr_0, listen_addr_1 } => {
run_server_node(
vec![listen_addr_0, listen_addr_1],
vec![2, 2],
WEAVE_NODE_NAME,
WEAVE_SERVER_NODE_DONE,
)
.await
.context("Error running weave-node server")?;
()
}
Opt::FuchsiaNode => {
run_fuchsia_node().await.context("Error running fuchsia-node")?;
}
Opt::WlanNode { connect_addr_0, connect_addr_1, connect_addr_2 } => {
run_client_node(
vec![connect_addr_0, connect_addr_1],
WLAN_NODE_NAME,
vec![WEAVE_NODE_NAME],
)
.await
.context("Error running wlan-node client")?;
run_client_node(vec![connect_addr_2], WLAN_NODE_1_NAME, vec![WPAN_SERVER_NODE_NAME])
.await
.context("Error running wlan-node client 1")?;
}
Opt::WpanNode { connect_addr_0, connect_addr_1, listen_addr_0 } => {
run_client_node(
vec![connect_addr_0, connect_addr_1],
WPAN_NODE_NAME,
vec![WEAVE_NODE_NAME],
)
.await
.context("Error running wpan-node client")?;
run_server_node(
vec![listen_addr_0],
vec![1],
WPAN_SERVER_NODE_NAME,
WPAN_SERVER_NODE_DONE,
)
.await
.context("Error running wpan-node server")?;
}
};
Ok(())
}
| 34.284635 | 98 | 0.602454 |
2867bcfa512a731c50e82ef130657d4ae03bc2c9 | 7,341 | use crate::{
stream::{interval, DEFAULT_POLL_DURATION},
JsonRpcClient, Provider, ProviderError,
};
use ethers_core::types::{TransactionReceipt, TxHash, U64};
use futures_core::stream::Stream;
use futures_util::stream::StreamExt;
use pin_project::pin_project;
use std::{
fmt,
future::Future,
ops::Deref,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
/// A pending transaction is a transaction which has been submitted but is not yet mined.
/// `await`'ing on a pending transaction will resolve to a transaction receipt
/// once the transaction has enough `confirmations`. The default number of confirmations
/// is 1, but may be adjusted with the `confirmations` method. If the transaction does not
/// have enough confirmations or is not mined, the future will stay in the pending state.
#[pin_project]
pub struct PendingTransaction<'a, P> {
tx_hash: TxHash,
confirmations: usize,
provider: &'a Provider<P>,
state: PendingTxState<'a>,
interval: Box<dyn Stream<Item = ()> + Send + Unpin>,
}
impl<'a, P: JsonRpcClient> PendingTransaction<'a, P> {
/// Creates a new pending transaction poller from a hash and a provider
pub fn new(tx_hash: TxHash, provider: &'a Provider<P>) -> Self {
let fut = Box::pin(provider.get_transaction_receipt(tx_hash));
Self {
tx_hash,
confirmations: 1,
provider,
state: PendingTxState::GettingReceipt(fut),
interval: Box::new(interval(DEFAULT_POLL_DURATION)),
}
}
/// Sets the number of confirmations for the pending transaction to resolve
/// to a receipt
pub fn confirmations(mut self, confs: usize) -> Self {
self.confirmations = confs;
self
}
/// Sets the polling interval
pub fn interval<T: Into<u64>>(mut self, duration: T) -> Self {
self.interval = Box::new(interval(Duration::from_millis(duration.into())));
self
}
}
impl<'a, P: JsonRpcClient> Future for PendingTransaction<'a, P> {
type Output = Result<TransactionReceipt, ProviderError>;
fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {
let this = self.project();
match this.state {
PendingTxState::GettingReceipt(fut) => {
// Wait the polling period so that we do not spam the chain when no
// new block has been mined
let _ready = futures_util::ready!(this.interval.poll_next_unpin(ctx));
if let Ok(receipt) = futures_util::ready!(fut.as_mut().poll(ctx)) {
*this.state = PendingTxState::CheckingReceipt(Box::new(receipt))
} else {
let fut = Box::pin(this.provider.get_transaction_receipt(*this.tx_hash));
*this.state = PendingTxState::GettingReceipt(fut)
}
}
PendingTxState::CheckingReceipt(receipt) => {
// If we requested more than 1 confirmation, we need to compare the receipt's
// block number and the current block
if *this.confirmations > 1 {
let fut = Box::pin(this.provider.get_block_number());
*this.state =
PendingTxState::GettingBlockNumber(fut, Box::new(*receipt.clone()));
// Schedule the waker to poll again
ctx.waker().wake_by_ref();
} else {
let receipt = *receipt.clone();
*this.state = PendingTxState::Completed;
return Poll::Ready(Ok(receipt));
}
}
PendingTxState::GettingBlockNumber(fut, receipt) => {
// Wait the polling period so that we do not spam the chain when no
// new block has been mined
let _ready = futures_util::ready!(this.interval.poll_next_unpin(ctx));
// Wait for the interval
let inclusion_block = receipt
.block_number
.expect("Receipt did not have a block number. This should never happen");
let current_block = futures_util::ready!(fut.as_mut().poll(ctx))?;
// if the transaction has at least K confirmations, return the receipt
// (subtract 1 since the tx already has 1 conf when it's mined)
if current_block >= inclusion_block + *this.confirmations - 1 {
let receipt = *receipt.clone();
*this.state = PendingTxState::Completed;
return Poll::Ready(Ok(receipt));
} else {
// we need to re-instantiate the get_block_number future so that
// we poll again
let fut = Box::pin(this.provider.get_block_number());
*this.state = PendingTxState::GettingBlockNumber(fut, receipt.clone());
return Poll::Pending;
}
}
PendingTxState::Completed => {
panic!("polled pending transaction future after completion")
}
};
Poll::Pending
}
}
impl<'a, P> fmt::Debug for PendingTransaction<'a, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PendingTransaction")
.field("tx_hash", &self.tx_hash)
.field("confirmations", &self.confirmations)
.field("state", &self.state)
.finish()
}
}
impl<'a, P> PartialEq for PendingTransaction<'a, P> {
fn eq(&self, other: &Self) -> bool {
self.tx_hash == other.tx_hash
}
}
impl<'a, P> PartialEq<TxHash> for PendingTransaction<'a, P> {
fn eq(&self, other: &TxHash) -> bool {
&self.tx_hash == other
}
}
impl<'a, P> Eq for PendingTransaction<'a, P> {}
impl<'a, P> Deref for PendingTransaction<'a, P> {
type Target = TxHash;
fn deref(&self) -> &Self::Target {
&self.tx_hash
}
}
// Helper type alias
type PinBoxFut<'a, T> = Pin<Box<dyn Future<Output = Result<T, ProviderError>> + 'a>>;
// We box the TransactionReceipts to keep the enum small.
enum PendingTxState<'a> {
/// Polling the blockchain for the receipt
GettingReceipt(PinBoxFut<'a, TransactionReceipt>),
/// Polling the blockchain for the current block number
GettingBlockNumber(PinBoxFut<'a, U64>, Box<TransactionReceipt>),
/// If the pending tx required only 1 conf, it will return early. Otherwise it will
/// proceed to the next state which will poll the block number until there have been
/// enough confirmations
CheckingReceipt(Box<TransactionReceipt>),
/// Future has completed and should panic if polled again
Completed,
}
impl<'a> fmt::Debug for PendingTxState<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let state = match self {
PendingTxState::GettingReceipt(_) => "GettingReceipt",
PendingTxState::GettingBlockNumber(_, _) => "GettingBlockNumber",
PendingTxState::CheckingReceipt(_) => "CheckingReceipt",
PendingTxState::Completed => "Completed",
};
f.debug_struct("PendingTxState")
.field("state", &state)
.finish()
}
}
| 37.646154 | 93 | 0.59229 |
e81639d6f36101e3055e569ed1fc46ea0ee80e5a | 649 | use std::fmt::{self, Display};
use std::error;
use nom::Err;
#[derive(Debug, Clone)]
pub struct Error {
message: String
}
impl Error {
pub fn new<T: Display>(message: T) -> Error {
Error {
message: message.to_string()
}
}
}
impl Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&self.message)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"parse error"
}
}
impl<I> From<Err<I>> for Error
where I : fmt::Debug {
fn from(err: Err<I>) -> Self {
Error::new(format!("{:?}", err))
}
} | 18.542857 | 66 | 0.5547 |
e425ff5a59654a685f93367a38026ae9e4ebbc8a | 10,259 | #[macro_use] extern crate log;
extern crate env_logger;
#[macro_use] extern crate serenity;
extern crate kankyo;
extern crate chrono;
extern crate rand;
use serenity::CACHE;
use serenity::framework::StandardFramework;
use serenity::framework::standard::{HelpBehaviour, help_commands};
use serenity::model::event::ResumedEvent;
use serenity::model::gateway::Ready;
use serenity::prelude::*;
use serenity::http;
use rand::distributions::{Distribution, Uniform};
use std::collections::HashSet;
use std::env;
use chrono::prelude::*;
struct Handler;
impl EventHandler for Handler {
fn ready(&self, _: Context, ready: Ready) {
info!("Connected as {}", ready.user.name);
}
fn resume(&self, _: Context, _: ResumedEvent) {
info!("Resumed");
}
}
fn main() {
kankyo::load().expect("Failed to load .env file");
env_logger::init();
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
let mut client = Client::new(&token, Handler).expect("Err creating client");
let owners = match http::get_current_application_info() {
Ok(info) => {
let mut set = HashSet::new();
set.insert(info.owner.id);
set
},
Err(why) => panic!("Couldn't get application info: {:?}", why),
};
client.with_framework(StandardFramework::new()
.configure(|c| c
.owners(owners)
.on_mention(true)
.prefix("="))
.customised_help(help_commands::with_embeds, |c| {
c.individual_command_tip("If you want more information about a command pass it as an argument to help.")
.lacking_permissions(HelpBehaviour::Hide)
.striked_commands_tip(None)
.command_not_found_text("Command not found {}")
.lacking_role(HelpBehaviour::Hide)
.wrong_channel(HelpBehaviour::Hide)
})
.group("Commands", |g| g
.command("today", |c| c.cmd(today)
.desc("todays comic"))
.command("about", |c| c.cmd(about))
.command("invite", |c| c.cmd(invite))
.command("tomorrow", |c| c.cmd(tomorrow))
.command("yesterday", |c| c.cmd(yesterday))
.command("date", |c| c.cmd(other_day)
.num_args(1)
.desc("comic from a specific date")
.usage("yyyy-mm-dd"))
.command("random", |c| c.cmd(random)
.desc("random comic"))
.command("stats", |c| c.cmd(stats)).owners_only(true)
));
if let Err(why) = client.start() {
error!("Client error: {:?}", why);
}
}
command!(yesterday(_ctx, msg, _args) {
let utc: DateTime<Utc> = Utc::now();
let date: NaiveDate = NaiveDate::from_ymd(utc.year(), utc.month(), utc.day()).pred();
let _ = match garfield_url(date) {
Some(url) => msg.channel_id.send_message(|m| m
.embed(|e| {
e
.author(|a| {a.name("Garfield.com").url("https://garfield.com")})
.title(format!("Garfield: {}-{}-{}", date.year(), date.month(), date.day()))
.url(format!("https://garfield.com/comic/{}/{}/{}", date.year(), date.month(), date.day()))
.thumbnail("https://cdn.discordapp.com/attachments/381880193700069377/506066660839653386/favicon.png")
.image(url.as_str())
.colour((214,135,23))
})),
None => msg.channel_id.say("Invalid date."),
};
});
command!(tomorrow(_ctx, msg, _args) {
let mut rng = rand::thread_rng();
let time_travel: [&str; 3] = ["2017-08-07", "2015-01-08", "1998-11-14"];
let range = Uniform::new(0,3);
let comic_date = range.sample(&mut rng);
let utc = match NaiveDate::parse_from_str(time_travel[comic_date], "%Y-%m-%d") {
Ok(day) => day,
Err(why) => {
warn!("Error: {}, input: {}", why, comic_date);
let _ = msg.channel_id.say("Invalid input.");
return Ok(())
},
};
let _ = match garfield_url(utc) {
Some(url) => msg.channel_id.send_message(|m| m
.embed(|e| {
e
.author(|a| {a.name("Garfield.com").url("https://garfield.com")})
.title(format!("Garfield: {}-{}-{}", utc.year(), utc.month(), utc.day()))
.url(format!("https://garfield.com/comic/{}/{}/{}", utc.year(), utc.month(), utc.day()))
.thumbnail("https://cdn.discordapp.com/attachments/381880193700069377/506066660839653386/favicon.png")
.image(url.as_str())
.colour((214,135,23))
})),
None => msg.channel_id.say("Invalid date. (date should be between 1978-06-19 and today.)"),
};
});
command!(invite(_ctx, msg, _args) {
if let Err(why) =
msg.channel_id.say("Invite the bot to your server: <https://discordapp.com/oauth2/authorize?client_id=404364579645292564&scope=bot>
") {
warn!("Error sending message: {:?}", why);
}
});
command!(stats(_ctx, msg, _args) {
let guilds = {
let cache = CACHE.read();
cache.guilds.clone()
};
if let Err(why) = msg.channel_id.say(format!("The bot is in {} servers.", guilds.len())) {
warn!("Error sending message: {:?}", why);
}
let mut svec = Vec::new();
for g in guilds {
let pg = g.0.to_partial_guild()?;
svec.push(pg.name);
}
info!("The bot is in the following guilds:\n{:#?}", svec);
});
command!(about(_ctx, msg, _args) {
if let Err(why) = msg.channel_id.say("This is a garfield comic bot.") {
warn!("Error sending message: {:?}", why);
}
});
fn garfield_url(date: NaiveDate) -> Option<String> {
//1978-06-19
let now: DateTime<Utc> = Utc::now();
let tday: NaiveDate = NaiveDate::from_ymd(now.year(), now.month(), now.day());
if date > NaiveDate::from_ymd(1978, 6, 18) && date <= tday
{
Some(format!("https://d1ejxu6vysztl5.cloudfront.net/comics/garfield/{}/{}-{:02}-{:02}.gif?format=png",
date.year(),
date.year(),
date.month(),
date.day()))
} else {
None
}
}
command!(today(_ctx, msg, _args) {
let utc: DateTime<Utc> = Utc::now();
let date: NaiveDate = NaiveDate::from_ymd(utc.year(), utc.month(), utc.day());
let _ = match garfield_url(date) {
Some(url) => msg.channel_id.send_message(|m| m
.embed(|e| {
e
.author(|a| {a.name("Garfield.com").url("https://garfield.com")})
.title(format!("Garfield: {}-{}-{}", date.year(), date.month(), date.day()))
.url(format!("https://garfield.com/comic/{}/{}/{}", date.year(), date.month(), date.day()))
.thumbnail("https://cdn.discordapp.com/attachments/381880193700069377/506066660839653386/favicon.png")
.image(url.as_str())
.colour((214,135,23))
})),
None => msg.channel_id.say("Invalid date."),
};
});
command!(other_day(_ctx, msg, args) {
let date = args.single::<String>().unwrap();
let utc = match NaiveDate::parse_from_str(&date, "%Y-%m-%d") {
Ok(day) => day,
Err(why) => {
warn!("Error: {}, input: {}", why, date);
let _ = msg.channel_id.say("Invalid input.");
return Ok(())
},
};
let _ = match garfield_url(utc) {
Some(url) => msg.channel_id.send_message(|m| m
.embed(|e| {
e
.author(|a| {a.name("Garfield.com").url("https://garfield.com")})
.title(format!("Garfield: {}-{}-{}", utc.year(), utc.month(), utc.day()))
.url(format!("https://garfield.com/comic/{}/{}/{}", utc.year(), utc.month(), utc.day()))
.thumbnail("https://cdn.discordapp.com/attachments/381880193700069377/506066660839653386/favicon.png")
.image(url.as_str())
.colour((214,135,23))
})),
None => msg.channel_id.say("Invalid date. (date should be between 1978-06-19 and today.)"),
};
});
fn get_month_len(month: usize) -> usize {
match month {
1 => 31,
2 => 28,
3 => 31,
4 => 30,
5 => 31,
6 => 30,
7 => 31,
8 => 31,
9 => 30,
10 => 31,
11 => 30,
12 => 31,
_ => 31,
}
}
command!(random(_ctx, msg, _args) {
let utc: DateTime<Utc> = Utc::now();
let cyear: usize = utc.year() as usize;
let cmonth: usize = utc.month() as usize;
let cday: usize = utc.day() as usize;
let r0 = Uniform::new(1978, cyear+1);
let r1 = Uniform::new(6, 12+1);
let r2 = Uniform::new(1, cmonth+1);
let r3 = Uniform::new(1, 12+1);
let mut rng = rand::thread_rng();
let year: usize = r0.sample(& mut rng);
let month: usize = match year {
1978 => r1.sample(& mut rng),
year if year == cyear => r2.sample(&mut rng),
_ => r3.sample(& mut rng),
};
let day: usize = match year {
1978 => {
match month {
6 => Uniform::new(19, 30+1).sample(& mut rng),
_ => Uniform::new(1, get_month_len(month) + 1).sample(& mut rng),
}
},
year if year == cyear => Uniform::new(1, cday + 1).sample(& mut rng),
_ => Uniform::new(1, get_month_len(month) + 1).sample(& mut rng),
};
let date: NaiveDate = NaiveDate::from_ymd(year as i32, month as u32, day as u32);
let _ = match garfield_url(date) {
Some(url) => msg.channel_id.send_message(|m| m
.embed(|e| {
e
.author(|a| {a.name("Garfield.com").url("https://garfield.com")})
.title(format!("Garfield: {}-{}-{}", date.year(), date.month(), date.day()))
.url(format!("https://garfield.com/comic/{}/{}/{}", date.year(), date.month(), date.day()))
.thumbnail("https://cdn.discordapp.com/attachments/381880193700069377/506066660839653386/favicon.png")
.image(url.as_str())
.colour((214,135,23))
})),
None => msg.channel_id.say("Invalid date."),
};
});
| 35.375862 | 139 | 0.537869 |
d52ec430747a97492e9cc9aaf6e0183c1e35a373 | 3,011 | use {
borsh::{BorshDeserialize, BorshSerialize},
solana_program::{
instruction::{
AccountMeta,
Instruction,
},
pubkey::Pubkey,
system_program,
sysvar,
},
crate::{
id,
state::Receiver,
},
};
use crate::state::Document;
#[derive(BorshSerialize, BorshDeserialize, Debug)]
pub enum DocumentsInstruction {
/// Create a new receiver account
///
/// Accounts expected:
///
/// 0. `[signer]` Sender account/Funder account (must be a system account)
/// 1. `[]` PDA address of the receiver of the document
/// 2. `[]` Wallet address of the document receiver
/// 3. `[]` Rent sysvar
/// 4. `[]` System program
CreateReceiverAccount,
/// Create a new document account
///
/// Accounts expected:
///
/// 0. `[signer]` Sender account/Funding account (must be a system account)
/// 1. `[writable]` PDA address of the receiver of the document
/// 2. `[writable]` PDA address of the document
/// 3. `[]` Wallet address of the document receiver
/// 4. `[]` Rent sysvar
/// 5. `[]` Clock sysvar
/// 6. `[]` System program
SendDocument {
/// Link of the meta file and checksum
data: Vec<u8>,
},
}
/// Creates CreateReceiverAccount instruction
pub fn create_receiver_account(
funder_address: &Pubkey,
wallet_address: &Pubkey,
) -> Instruction {
let receiver_pda_address = Receiver::find_pda_address(
&wallet_address,
&id(),
);
Instruction::new_with_borsh(
id(),
&DocumentsInstruction::CreateReceiverAccount {},
vec![
AccountMeta::new(*funder_address, true),
AccountMeta::new(receiver_pda_address, false),
AccountMeta::new(*wallet_address, false),
AccountMeta::new_readonly(sysvar::rent::id(), false),
AccountMeta::new_readonly(system_program::id(), false),
],
)
}
/// Creates SendDocument instruction
pub fn send_document(
funder_address: &Pubkey,
wallet_address: &Pubkey,
document_index: u32,
document_data: Vec<u8>,
) -> Instruction {
let receiver_pda_address = Receiver::find_pda_address(
&wallet_address,
&id(),
);
let document_pda_address = Document::find_pda_address(
document_index,
&wallet_address,
&id(),
);
Instruction::new_with_borsh(
id(),
&DocumentsInstruction::SendDocument {
data: document_data,
},
vec![
AccountMeta::new(*funder_address, true),
AccountMeta::new(receiver_pda_address, false),
AccountMeta::new(document_pda_address, false),
AccountMeta::new(*wallet_address, false),
AccountMeta::new_readonly(sysvar::rent::id(), false),
AccountMeta::new_readonly(sysvar::clock::id(), false),
AccountMeta::new_readonly(system_program::id(), false),
],
)
} | 28.67619 | 79 | 0.595151 |
7204782a324e301faf1085ac21f9aa98316d803f | 277 | // --force-warn $LINT causes $LINT (which is warn-by-default) to warn
// despite $LINT being allowed in module
// compile-flags: --force-warn dead_code -Zunstable-options
// check-pass
#![allow(dead_code)]
fn dead_function() {}
//~^ WARN function is never used
fn main() {}
| 23.083333 | 69 | 0.696751 |
69198cf68285a78af0e714fe30bd52fe23f2147b | 317 | use std::borrow::Cow;
pub type TemplateParseResult<T> = Result<T, TemplateParseError>;
#[derive(Debug)]
pub enum TemplateParseError {
Syntax(Cow<'static, str>),
}
//quick_error! {
// #[derive(Debug)]
// pub enum ParseError {
// Syntax(err: String) {
// from()
// },
// }
//}
| 16.684211 | 64 | 0.564669 |
031a96511f5c39d82f8d3eb410fd6cc719368e07 | 10,847 | #![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CheckResourceNameResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<check_resource_name_result::Status>,
}
impl CheckResourceNameResult {
pub fn new() -> Self {
Self::default()
}
}
pub mod check_resource_name_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Allowed,
Reserved,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
}
impl ErrorDefinition {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDefinition>,
}
impl ErrorResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Location {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "regionalDisplayName", default, skip_serializing_if = "Option::is_none")]
pub regional_display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<LocationMetadata>,
}
impl Location {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LocationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Location>,
}
impl LocationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LocationMetadata {
#[serde(rename = "regionType", default, skip_serializing_if = "Option::is_none")]
pub region_type: Option<location_metadata::RegionType>,
#[serde(rename = "regionCategory", default, skip_serializing_if = "Option::is_none")]
pub region_category: Option<location_metadata::RegionCategory>,
#[serde(rename = "geographyGroup", default, skip_serializing_if = "Option::is_none")]
pub geography_group: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub longitude: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub latitude: Option<String>,
#[serde(rename = "physicalLocation", default, skip_serializing_if = "Option::is_none")]
pub physical_location: Option<String>,
#[serde(rename = "pairedRegion", default, skip_serializing_if = "Vec::is_empty")]
pub paired_region: Vec<PairedRegion>,
}
impl LocationMetadata {
pub fn new() -> Self {
Self::default()
}
}
pub mod location_metadata {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RegionType {
Physical,
Logical,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RegionCategory {
Recommended,
Other,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ManagedByTenant {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
impl ManagedByTenant {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
impl Operation {
pub fn new() -> Self {
Self::default()
}
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
impl Display {
pub fn new() -> Self {
Self::default()
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl OperationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PairedRegion {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
}
impl PairedRegion {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceName {
pub name: String,
#[serde(rename = "type")]
pub type_: String,
}
impl ResourceName {
pub fn new(name: String, type_: String) -> Self {
Self { name, type_ }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Subscription {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<subscription::State>,
#[serde(rename = "subscriptionPolicies", default, skip_serializing_if = "Option::is_none")]
pub subscription_policies: Option<SubscriptionPolicies>,
#[serde(rename = "authorizationSource", default, skip_serializing_if = "Option::is_none")]
pub authorization_source: Option<String>,
#[serde(rename = "managedByTenants", default, skip_serializing_if = "Vec::is_empty")]
pub managed_by_tenants: Vec<ManagedByTenant>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl Subscription {
pub fn new() -> Self {
Self::default()
}
}
pub mod subscription {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Warned,
PastDue,
Disabled,
Deleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Subscription>,
#[serde(rename = "nextLink")]
pub next_link: String,
}
impl SubscriptionListResult {
pub fn new(next_link: String) -> Self {
Self {
value: Vec::new(),
next_link,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SubscriptionPolicies {
#[serde(rename = "locationPlacementId", default, skip_serializing_if = "Option::is_none")]
pub location_placement_id: Option<String>,
#[serde(rename = "quotaId", default, skip_serializing_if = "Option::is_none")]
pub quota_id: Option<String>,
#[serde(rename = "spendingLimit", default, skip_serializing_if = "Option::is_none")]
pub spending_limit: Option<subscription_policies::SpendingLimit>,
}
impl SubscriptionPolicies {
pub fn new() -> Self {
Self::default()
}
}
pub mod subscription_policies {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SpendingLimit {
On,
Off,
CurrentPeriodOff,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct TenantIdDescription {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "tenantCategory", default, skip_serializing_if = "Option::is_none")]
pub tenant_category: Option<tenant_id_description::TenantCategory>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub country: Option<String>,
#[serde(rename = "countryCode", default, skip_serializing_if = "Option::is_none")]
pub country_code: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub domains: Vec<String>,
}
impl TenantIdDescription {
pub fn new() -> Self {
Self::default()
}
}
pub mod tenant_id_description {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TenantCategory {
Home,
ProjectedBy,
ManagedBy,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenantListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<TenantIdDescription>,
#[serde(rename = "nextLink")]
pub next_link: String,
}
impl TenantListResult {
pub fn new(next_link: String) -> Self {
Self {
value: Vec::new(),
next_link,
}
}
}
| 34.654952 | 95 | 0.668756 |
28cf629f590ab2e8d4f93d9040a73473dbf004c8 | 25,061 | #![allow(clippy::len_without_is_empty)]
use std::fmt::Debug;
use std::marker::PhantomData;
use std::slice::Iter;
use anyhow::{ensure, Result};
use bellperson::bls::Fr;
use filecoin_hashers::{Hasher, PoseidonArity};
use generic_array::typenum::{Unsigned, U0};
use merkletree::hash::Algorithm;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::drgraph::graph_height;
/// Trait to abstract over the concept of Merkle Proof.
pub trait MerkleProofTrait: Clone + Serialize + DeserializeOwned + Debug + Sync + Send {
type Hasher: Hasher;
type Arity: 'static + PoseidonArity;
type SubTreeArity: 'static + PoseidonArity;
type TopTreeArity: 'static + PoseidonArity;
/// Try to convert a merkletree proof into this structure.
fn try_from_proof(
p: merkletree::proof::Proof<<Self::Hasher as Hasher>::Domain, Self::Arity>,
) -> Result<Self>;
fn as_options(&self) -> Vec<(Vec<Option<Fr>>, Option<usize>)> {
self.path()
.iter()
.map(|v| {
(
v.0.iter().copied().map(Into::into).map(Some).collect(),
Some(v.1),
)
})
.collect::<Vec<_>>()
}
fn into_options_with_leaf(self) -> (Option<Fr>, Vec<(Vec<Option<Fr>>, Option<usize>)>) {
let leaf = self.leaf();
let path = self.path();
(
Some(leaf.into()),
path.into_iter()
.map(|(a, b)| {
(
a.iter().copied().map(Into::into).map(Some).collect(),
Some(b),
)
})
.collect::<Vec<_>>(),
)
}
fn as_pairs(&self) -> Vec<(Vec<Fr>, usize)> {
self.path()
.iter()
.map(|v| (v.0.iter().copied().map(Into::into).collect(), v.1))
.collect::<Vec<_>>()
}
fn verify(&self) -> bool;
/// Validates the MerkleProof and that it corresponds to the supplied node.
///
/// TODO: audit performance and usage in case verification is
/// unnecessary based on how it's used.
fn validate(&self, node: usize) -> bool {
if !self.verify() {
return false;
}
node == self.path_index()
}
fn validate_data(&self, data: <Self::Hasher as Hasher>::Domain) -> bool {
if !self.verify() {
return false;
}
self.leaf() == data
}
fn leaf(&self) -> <Self::Hasher as Hasher>::Domain;
fn root(&self) -> <Self::Hasher as Hasher>::Domain;
fn len(&self) -> usize;
fn path(&self) -> Vec<(Vec<<Self::Hasher as Hasher>::Domain>, usize)>;
fn path_index(&self) -> usize {
self.path()
.iter()
.rev()
.fold(0, |acc, (_, index)| (acc * Self::Arity::to_usize()) + index)
}
fn proves_challenge(&self, challenge: usize) -> bool {
self.path_index() == challenge
}
/// Calcluates the exected length of the full path, given the number of leaves in the base layer.
fn expected_len(&self, leaves: usize) -> usize {
compound_path_length::<Self::Arity, Self::SubTreeArity, Self::TopTreeArity>(leaves)
}
}
pub fn base_path_length<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {
let leaves = if C::to_usize() > 0 {
leaves / C::to_usize() / B::to_usize()
} else if B::to_usize() > 0 {
leaves / B::to_usize()
} else {
leaves
};
graph_height::<A>(leaves) - 1
}
pub fn compound_path_length<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {
let mut len = base_path_length::<A, B, C>(leaves);
if B::to_usize() > 0 {
len += 1;
}
if C::to_usize() > 0 {
len += 1;
}
len
}
pub fn compound_tree_height<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {
// base layer
let a = graph_height::<A>(leaves) - 1;
// sub tree layer
let b = if B::to_usize() > 0 {
B::to_usize() - 1
} else {
0
};
// top tree layer
let c = if C::to_usize() > 0 {
C::to_usize() - 1
} else {
0
};
a + b + c
}
macro_rules! forward_method {
($caller:expr, $name:ident) => {
match $caller {
ProofData::Single(ref proof) => proof.$name(),
ProofData::Sub(ref proof) => proof.$name(),
ProofData::Top(ref proof) => proof.$name(),
}
};
($caller:expr, $name:ident, $( $args:expr ),+) => {
match $caller {
ProofData::Single(ref proof) => proof.$name($($args),+),
ProofData::Sub(ref proof) => proof.$name($($args),+),
ProofData::Top(ref proof) => proof.$name($($args),+),
}
};
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct InclusionPath<H: Hasher, Arity: PoseidonArity> {
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
path: Vec<PathElement<H, Arity>>,
}
impl<H: Hasher, Arity: PoseidonArity> From<Vec<PathElement<H, Arity>>> for InclusionPath<H, Arity> {
fn from(path: Vec<PathElement<H, Arity>>) -> Self {
Self { path }
}
}
impl<H: Hasher, Arity: PoseidonArity> InclusionPath<H, Arity> {
/// Calculate the root of this path, given the leaf as input.
pub fn root(&self, leaf: H::Domain) -> H::Domain {
let mut a = H::Function::default();
(0..self.path.len()).fold(leaf, |h, height| {
a.reset();
let index = self.path[height].index;
let mut nodes = self.path[height].hashes.clone();
nodes.insert(index, h);
a.multi_node(&nodes, height)
})
}
pub fn len(&self) -> usize {
self.path.len()
}
pub fn is_empty(&self) -> bool {
self.path.is_empty()
}
pub fn iter(&self) -> Iter<'_, PathElement<H, Arity>> {
self.path.iter()
}
pub fn path_index(&self) -> usize {
self.path
.iter()
.rev()
.fold(0, |acc, p| (acc * Arity::to_usize()) + p.index)
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct PathElement<H: Hasher, Arity: PoseidonArity> {
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
hashes: Vec<H::Domain>,
index: usize,
#[serde(skip)]
_arity: PhantomData<Arity>,
}
/// Representation of a merkle proof.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MerkleProof<
H: Hasher,
BaseArity: PoseidonArity,
SubTreeArity: PoseidonArity = U0,
TopTreeArity: PoseidonArity = U0,
> {
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
data: ProofData<H, BaseArity, SubTreeArity, TopTreeArity>,
}
impl<
H: Hasher,
Arity: 'static + PoseidonArity,
SubTreeArity: 'static + PoseidonArity,
TopTreeArity: 'static + PoseidonArity,
> MerkleProofTrait for MerkleProof<H, Arity, SubTreeArity, TopTreeArity>
{
type Hasher = H;
type Arity = Arity;
type SubTreeArity = SubTreeArity;
type TopTreeArity = TopTreeArity;
fn try_from_proof(
p: merkletree::proof::Proof<<Self::Hasher as Hasher>::Domain, Self::Arity>,
) -> Result<Self> {
if p.top_layer_nodes() > 0 {
Ok(MerkleProof {
data: ProofData::Top(TopProof::try_from_proof(p)?),
})
} else if p.sub_layer_nodes() > 0 {
Ok(MerkleProof {
data: ProofData::Sub(SubProof::try_from_proof(p)?),
})
} else {
Ok(MerkleProof {
data: ProofData::Single(SingleProof::try_from_proof(p)?),
})
}
}
fn verify(&self) -> bool {
forward_method!(self.data, verify)
}
fn leaf(&self) -> H::Domain {
forward_method!(self.data, leaf)
}
fn root(&self) -> H::Domain {
forward_method!(self.data, root)
}
fn len(&self) -> usize {
forward_method!(self.data, len)
}
fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {
forward_method!(self.data, path)
}
fn path_index(&self) -> usize {
forward_method!(self.data, path_index)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
enum ProofData<
H: Hasher,
BaseArity: PoseidonArity,
SubTreeArity: PoseidonArity,
TopTreeArity: PoseidonArity,
> {
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
Single(SingleProof<H, BaseArity>),
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
Sub(SubProof<H, BaseArity, SubTreeArity>),
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
Top(TopProof<H, BaseArity, SubTreeArity, TopTreeArity>),
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
struct SingleProof<H: Hasher, Arity: PoseidonArity> {
/// Root of the merkle tree.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
root: H::Domain,
/// The original leaf data for this prof.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
leaf: H::Domain,
/// The path from leaf to root.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
path: InclusionPath<H, Arity>,
}
impl<H: Hasher, Arity: PoseidonArity> SingleProof<H, Arity> {
pub fn new(path: InclusionPath<H, Arity>, root: H::Domain, leaf: H::Domain) -> Self {
SingleProof { root, leaf, path }
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
struct SubProof<H: Hasher, BaseArity: PoseidonArity, SubTreeArity: PoseidonArity> {
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
base_proof: InclusionPath<H, BaseArity>,
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
sub_proof: InclusionPath<H, SubTreeArity>,
/// Root of the merkle tree.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
root: H::Domain,
/// The original leaf data for this prof.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
leaf: H::Domain,
}
impl<H: Hasher, BaseArity: PoseidonArity, SubTreeArity: PoseidonArity>
SubProof<H, BaseArity, SubTreeArity>
{
pub fn new(
base_proof: InclusionPath<H, BaseArity>,
sub_proof: InclusionPath<H, SubTreeArity>,
root: H::Domain,
leaf: H::Domain,
) -> Self {
Self {
base_proof,
sub_proof,
root,
leaf,
}
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
struct TopProof<
H: Hasher,
BaseArity: PoseidonArity,
SubTreeArity: PoseidonArity,
TopTreeArity: PoseidonArity,
> {
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
base_proof: InclusionPath<H, BaseArity>,
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
sub_proof: InclusionPath<H, SubTreeArity>,
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
top_proof: InclusionPath<H, TopTreeArity>,
/// Root of the merkle tree.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
root: H::Domain,
/// The original leaf data for this prof.
#[serde(bound(
serialize = "H::Domain: Serialize",
deserialize = "H::Domain: Deserialize<'de>"
))]
leaf: H::Domain,
}
impl<
H: Hasher,
BaseArity: PoseidonArity,
SubTreeArity: PoseidonArity,
TopTreeArity: PoseidonArity,
> TopProof<H, BaseArity, SubTreeArity, TopTreeArity>
{
pub fn new(
base_proof: InclusionPath<H, BaseArity>,
sub_proof: InclusionPath<H, SubTreeArity>,
top_proof: InclusionPath<H, TopTreeArity>,
root: H::Domain,
leaf: H::Domain,
) -> Self {
Self {
base_proof,
sub_proof,
top_proof,
root,
leaf,
}
}
}
impl<
H: Hasher,
BaseArity: PoseidonArity,
SubTreeArity: PoseidonArity,
TopTreeArity: PoseidonArity,
> MerkleProof<H, BaseArity, SubTreeArity, TopTreeArity>
{
pub fn new(n: usize) -> Self {
let root = Default::default();
let leaf = Default::default();
let path_elem = PathElement {
hashes: vec![Default::default(); BaseArity::to_usize()],
index: 0,
_arity: Default::default(),
};
let path = vec![path_elem; n];
MerkleProof {
data: ProofData::Single(SingleProof::new(path.into(), root, leaf)),
}
}
}
/// Converts a merkle_light proof to a SingleProof
fn proof_to_single<H: Hasher, Arity: PoseidonArity, TargetArity: PoseidonArity>(
proof: &merkletree::proof::Proof<H::Domain, Arity>,
lemma_start_index: usize,
sub_root: Option<H::Domain>,
) -> SingleProof<H, TargetArity> {
let root = proof.root();
let leaf = if let Some(sub_root) = sub_root {
sub_root
} else {
proof.item()
};
let path = extract_path::<H, TargetArity>(proof.lemma(), proof.path(), lemma_start_index);
SingleProof::new(path, root, leaf)
}
/// 'lemma_start_index' is required because sub/top proofs start at
/// index 0 and base proofs start at index 1 (skipping the leaf at the
/// front)
fn extract_path<H: Hasher, Arity: PoseidonArity>(
lemma: &[H::Domain],
path: &[usize],
lemma_start_index: usize,
) -> InclusionPath<H, Arity> {
let path = lemma[lemma_start_index..lemma.len() - 1]
.chunks(Arity::to_usize() - 1)
.zip(path.iter())
.map(|(hashes, index)| PathElement {
hashes: hashes.to_vec(),
index: *index,
_arity: Default::default(),
})
.collect::<Vec<_>>();
path.into()
}
impl<H: Hasher, Arity: 'static + PoseidonArity> SingleProof<H, Arity> {
fn try_from_proof(p: merkletree::proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {
Ok(proof_to_single(&p, 1, None))
}
fn verify(&self) -> bool {
let calculated_root = self.path.root(self.leaf);
self.root == calculated_root
}
fn leaf(&self) -> H::Domain {
self.leaf
}
fn root(&self) -> H::Domain {
self.root
}
fn len(&self) -> usize {
self.path.len() * (Arity::to_usize() - 1) + 2
}
fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {
self.path
.iter()
.map(|x| (x.hashes.clone(), x.index))
.collect::<Vec<_>>()
}
fn path_index(&self) -> usize {
self.path.path_index()
}
}
impl<H: Hasher, Arity: 'static + PoseidonArity, SubTreeArity: 'static + PoseidonArity>
SubProof<H, Arity, SubTreeArity>
{
fn try_from_proof(p: merkletree::proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {
ensure!(
p.sub_layer_nodes() == SubTreeArity::to_usize(),
"sub arity mismatch"
);
ensure!(
p.sub_tree_proof.is_some(),
"Cannot generate sub proof without a base-proof"
);
let base_p = p.sub_tree_proof.as_ref().expect("proof as_ref failure");
// Generate SubProof
let root = p.root();
let leaf = base_p.item();
let base_proof = extract_path::<H, Arity>(base_p.lemma(), base_p.path(), 1);
let sub_proof = extract_path::<H, SubTreeArity>(p.lemma(), p.path(), 0);
Ok(SubProof::new(base_proof, sub_proof, root, leaf))
}
fn verify(&self) -> bool {
let sub_leaf = self.base_proof.root(self.leaf);
let calculated_root = self.sub_proof.root(sub_leaf);
self.root == calculated_root
}
fn leaf(&self) -> H::Domain {
self.leaf
}
fn root(&self) -> H::Domain {
self.root
}
fn len(&self) -> usize {
SubTreeArity::to_usize()
}
fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {
self.base_proof
.iter()
.map(|x| (x.hashes.clone(), x.index))
.chain(self.sub_proof.iter().map(|x| (x.hashes.clone(), x.index)))
.collect()
}
fn path_index(&self) -> usize {
let mut base_proof_leaves = 1;
for _i in 0..self.base_proof.len() {
base_proof_leaves *= Arity::to_usize()
}
let sub_proof_index = self.sub_proof.path_index();
(sub_proof_index * base_proof_leaves) + self.base_proof.path_index()
}
}
impl<
H: Hasher,
Arity: 'static + PoseidonArity,
SubTreeArity: 'static + PoseidonArity,
TopTreeArity: 'static + PoseidonArity,
> TopProof<H, Arity, SubTreeArity, TopTreeArity>
{
fn try_from_proof(p: merkletree::proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {
ensure!(
p.top_layer_nodes() == TopTreeArity::to_usize(),
"top arity mismatch"
);
ensure!(
p.sub_layer_nodes() == SubTreeArity::to_usize(),
"sub arity mismatch"
);
ensure!(
p.sub_tree_proof.is_some(),
"Cannot generate top proof without a sub-proof"
);
let sub_p = p.sub_tree_proof.as_ref().expect("proofs as ref failure");
ensure!(
sub_p.sub_tree_proof.is_some(),
"Cannot generate top proof without a base-proof"
);
let base_p = sub_p
.sub_tree_proof
.as_ref()
.expect("proofs as ref failure");
let root = p.root();
let leaf = base_p.item();
let base_proof = extract_path::<H, Arity>(base_p.lemma(), base_p.path(), 1);
let sub_proof = extract_path::<H, SubTreeArity>(sub_p.lemma(), sub_p.path(), 0);
let top_proof = extract_path::<H, TopTreeArity>(p.lemma(), p.path(), 0);
Ok(TopProof::new(base_proof, sub_proof, top_proof, root, leaf))
}
fn verify(&self) -> bool {
let sub_leaf = self.base_proof.root(self.leaf);
let top_leaf = self.sub_proof.root(sub_leaf);
let calculated_root = self.top_proof.root(top_leaf);
self.root == calculated_root
}
fn leaf(&self) -> H::Domain {
self.leaf
}
fn root(&self) -> H::Domain {
self.root
}
fn len(&self) -> usize {
TopTreeArity::to_usize()
}
fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {
self.base_proof
.iter()
.map(|x| (x.hashes.clone(), x.index))
.chain(self.sub_proof.iter().map(|x| (x.hashes.clone(), x.index)))
.chain(self.top_proof.iter().map(|x| (x.hashes.clone(), x.index)))
.collect()
}
fn path_index(&self) -> usize {
let mut base_proof_leaves = 1;
for _i in 0..self.base_proof.len() {
base_proof_leaves *= Arity::to_usize()
}
let sub_proof_leaves = base_proof_leaves * SubTreeArity::to_usize();
let sub_proof_index = self.sub_proof.path_index();
let top_proof_index = self.top_proof.path_index();
(sub_proof_index * base_proof_leaves)
+ (top_proof_index * sub_proof_leaves)
+ self.base_proof.path_index()
}
}
#[cfg(test)]
mod tests {
use super::*;
use filecoin_hashers::{
blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain,
};
use generic_array::typenum::{U2, U4, U8};
use rand::thread_rng;
use crate::merkle::{
generate_tree, get_base_tree_count, DiskStore, MerkleTreeTrait, MerkleTreeWrapper,
};
fn merklepath<Tree: 'static + MerkleTreeTrait>() {
let node_size = 32;
let nodes = 64 * get_base_tree_count::<Tree>();
let mut rng = thread_rng();
let (data, tree) = generate_tree::<Tree, _>(&mut rng, nodes, None);
for i in 0..nodes {
let proof = tree.gen_proof(i).expect("gen_proof failure");
assert!(proof.verify(), "failed to validate");
assert!(proof.validate(i), "failed to validate valid merkle path");
let data_slice = &data[i * node_size..(i + 1) * node_size].to_vec();
assert!(
proof.validate_data(
<Tree::Hasher as Hasher>::Domain::try_from_bytes(data_slice)
.expect("try from bytes failure")
),
"failed to validate valid data"
);
}
}
#[test]
fn merklepath_poseidon_2() {
merklepath::<
MerkleTreeWrapper<
PoseidonHasher,
DiskStore<<PoseidonHasher as Hasher>::Domain>,
U2,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_poseidon_4() {
merklepath::<
MerkleTreeWrapper<
PoseidonHasher,
DiskStore<<PoseidonHasher as Hasher>::Domain>,
U4,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_poseidon_8() {
merklepath::<
MerkleTreeWrapper<
PoseidonHasher,
DiskStore<<PoseidonHasher as Hasher>::Domain>,
U8,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_poseidon_8_2() {
merklepath::<
MerkleTreeWrapper<
PoseidonHasher,
DiskStore<<PoseidonHasher as Hasher>::Domain>,
U8,
U2,
U0,
>,
>();
}
#[test]
fn merklepath_poseidon_8_4() {
merklepath::<
MerkleTreeWrapper<
PoseidonHasher,
DiskStore<<PoseidonHasher as Hasher>::Domain>,
U8,
U4,
U0,
>,
>();
}
#[test]
fn merklepath_poseidon_8_4_2() {
merklepath::<
MerkleTreeWrapper<
PoseidonHasher,
DiskStore<<PoseidonHasher as Hasher>::Domain>,
U8,
U4,
U2,
>,
>();
}
#[test]
fn merklepath_sha256_2() {
merklepath::<
MerkleTreeWrapper<
Sha256Hasher,
DiskStore<<Sha256Hasher as Hasher>::Domain>,
U2,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_sha256_4() {
merklepath::<
MerkleTreeWrapper<
Sha256Hasher,
DiskStore<<Sha256Hasher as Hasher>::Domain>,
U4,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_sha256_2_4() {
merklepath::<
MerkleTreeWrapper<
Sha256Hasher,
DiskStore<<Sha256Hasher as Hasher>::Domain>,
U2,
U4,
U0,
>,
>();
}
#[test]
fn merklepath_sha256_top_2_4_2() {
merklepath::<
MerkleTreeWrapper<
Sha256Hasher,
DiskStore<<Sha256Hasher as Hasher>::Domain>,
U2,
U4,
U2,
>,
>();
}
#[test]
fn merklepath_blake2s_2() {
merklepath::<
MerkleTreeWrapper<
Blake2sHasher,
DiskStore<<Blake2sHasher as Hasher>::Domain>,
U2,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_blake2s_4() {
merklepath::<
MerkleTreeWrapper<
Blake2sHasher,
DiskStore<<Blake2sHasher as Hasher>::Domain>,
U4,
U0,
U0,
>,
>();
}
#[test]
fn merklepath_blake2s_8_4_2() {
merklepath::<
MerkleTreeWrapper<
Blake2sHasher,
DiskStore<<Blake2sHasher as Hasher>::Domain>,
U8,
U4,
U2,
>,
>();
}
}
| 27.691713 | 101 | 0.536531 |
031d38a13ef2a6702d70ecb1169cc0d4496baa18 | 165 | mod access;
mod block;
mod chunk;
mod chunks;
mod space;
pub use access::VoxelAccess;
pub use block::*;
pub use chunk::*;
pub use chunks::Chunks;
pub use space::*;
| 13.75 | 28 | 0.70303 |
90e7db7dbbb3fb6b7aff2a6f97c6b0ec5e3a897c | 797 | // xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cmp::{Eq, Ord};
use num::Num::from_int;
extern mod std;
use std::cmp::FuzzyEq;
pub trait NumExt: Num Eq Ord {}
pub trait FloatExt: NumExt FuzzyEq {}
fn greater_than_one<T:NumExt>(n: &T) -> bool { *n > from_int(1) }
fn greater_than_one_float<T:FloatExt>(n: &T) -> bool { *n > from_int(1) }
pub fn main() {}
| 29.518519 | 73 | 0.706399 |
bf7b137640e76bc22b0d6dd3deab25451e498d55 | 4,850 | use super::{Open, Sink};
use crate::audio::AudioPacket;
use alsa::device_name::HintIter;
use alsa::pcm::{Access, Format, Frames, HwParams, PCM};
use alsa::{Direction, Error, ValueOr};
use std::cmp::min;
use std::ffi::CString;
use std::io;
use std::process::exit;
const PREFERED_PERIOD_SIZE: Frames = 5512; // Period of roughly 125ms
const BUFFERED_PERIODS: Frames = 4;
pub struct AlsaSink {
pcm: Option<PCM>,
device: String,
buffer: Vec<i16>,
}
fn list_outputs() {
for t in &["pcm", "ctl", "hwdep"] {
println!("{} devices:", t);
let i = HintIter::new(None, &*CString::new(*t).unwrap()).unwrap();
for a in i {
if let Some(Direction::Playback) = a.direction {
// mimic aplay -L
println!(
"{}\n\t{}\n",
a.name.unwrap(),
a.desc.unwrap().replace("\n", "\n\t")
);
}
}
}
}
fn open_device(dev_name: &str) -> Result<(PCM, Frames), Box<Error>> {
let pcm = PCM::new(dev_name, Direction::Playback, false)?;
let mut period_size = PREFERED_PERIOD_SIZE;
// http://www.linuxjournal.com/article/6735?page=0,1#N0x19ab2890.0x19ba78d8
// latency = period_size * periods / (rate * bytes_per_frame)
// For 16 Bit stereo data, one frame has a length of four bytes.
// 500ms = buffer_size / (44100 * 4)
// buffer_size_bytes = 0.5 * 44100 / 4
// buffer_size_frames = 0.5 * 44100 = 22050
{
// Set hardware parameters: 44100 Hz / Stereo / 16 bit
let hwp = HwParams::any(&pcm)?;
hwp.set_access(Access::RWInterleaved)?;
hwp.set_format(Format::s16())?;
hwp.set_rate(44100, ValueOr::Nearest)?;
hwp.set_channels(2)?;
period_size = hwp.set_period_size_near(period_size, ValueOr::Greater)?;
hwp.set_buffer_size_near(period_size * BUFFERED_PERIODS)?;
pcm.hw_params(&hwp)?;
let swp = pcm.sw_params_current()?;
swp.set_start_threshold(hwp.get_buffer_size()? - hwp.get_period_size()?)?;
pcm.sw_params(&swp)?;
}
Ok((pcm, period_size))
}
impl Open for AlsaSink {
fn open(device: Option<String>) -> AlsaSink {
info!("Using alsa sink");
let name = match device.as_ref().map(AsRef::as_ref) {
Some("?") => {
println!("Listing available alsa outputs");
list_outputs();
exit(0)
}
Some(device) => device,
None => "default",
}
.to_string();
AlsaSink {
pcm: None,
device: name,
buffer: vec![],
}
}
}
impl Sink for AlsaSink {
fn start(&mut self) -> io::Result<()> {
if self.pcm.is_none() {
let pcm = open_device(&self.device);
match pcm {
Ok((p, period_size)) => {
self.pcm = Some(p);
// Create a buffer for all samples for a full period
self.buffer = Vec::with_capacity((period_size * 2) as usize);
}
Err(e) => {
error!("Alsa error PCM open {}", e);
return Err(io::Error::new(
io::ErrorKind::Other,
"Alsa error: PCM open failed",
));
}
}
}
Ok(())
}
fn stop(&mut self) -> io::Result<()> {
{
let pcm = self.pcm.as_mut().unwrap();
// Write any leftover data in the period buffer
// before draining the actual buffer
let io = pcm.io_i16().unwrap();
match io.writei(&self.buffer[..]) {
Ok(_) => (),
Err(err) => pcm.try_recover(err, false).unwrap(),
}
pcm.drain().unwrap();
}
self.pcm = None;
Ok(())
}
fn write(&mut self, packet: &AudioPacket) -> io::Result<()> {
let mut processed_data = 0;
let data = packet.samples();
while processed_data < data.len() {
let data_to_buffer = min(
self.buffer.capacity() - self.buffer.len(),
data.len() - processed_data,
);
self.buffer
.extend_from_slice(&data[processed_data..processed_data + data_to_buffer]);
processed_data += data_to_buffer;
if self.buffer.len() == self.buffer.capacity() {
let pcm = self.pcm.as_mut().unwrap();
let io = pcm.io_i16().unwrap();
match io.writei(&self.buffer) {
Ok(_) => (),
Err(err) => pcm.try_recover(err, false).unwrap(),
}
self.buffer.clear();
}
}
Ok(())
}
}
| 31.699346 | 91 | 0.501031 |
ddc4372bda10ac3571ea2031a27063beed191393 | 4,022 | // use cached::proc_macro::cached;
use log::{info, trace};
use std::collections::BTreeSet;
// use std::collections::{HashMap};
use std::env;
use std::io::{stdin, BufRead};
// use std::vec::Vec;
// extern crate cached;
// #[macro_use]
// extern crate cached;
fn main() {
env_logger::init();
trace!("starting up...");
let nums: BTreeSet<i64> = stdin()
.lock()
.lines()
.map(|line| line.unwrap().parse().unwrap())
.collect();
info!("got nums = {:?}", nums);
let is_part2 = !env::var("PART2")
.ok()
.unwrap_or(String::from(""))
.is_empty();
if !is_part2 {
part1(&nums);
} else {
println!("Part 2 is solved in another castle... (see Python code)")
}
}
fn part1(nums: &BTreeSet<i64>) {
let mut diff1 = 0;
let mut diff3 = 0;
let max = *nums.iter().max().unwrap();
let mut cur: i64 = max;
for n in nums.iter().rev() {
if *n == max {
continue;
}
let diff = cur - n;
info!("{} - {} = {}", cur, n, diff);
match diff {
1 => diff1 += 1,
3 => diff3 += 1,
_ => (),
}
cur = *n;
}
// Adaptor chain -> device.
match cur {
1 => diff1 += 1,
3 => diff3 += 1,
_ => (),
}
// Device -> outlet.
diff3 += 1;
println!("DIFF_1 = {}", diff1);
println!("DIFF_3 = {}", diff3);
println!("DIFF_1 * DIFF_3 = {}", diff1 * diff3);
}
/*
NOTE: I couldn't solve Part 2 in Rust because some sort of global memoiztion/caching needs to happen.
Rust makes that challenging (for my noob brain), so I practiced my Python for Part 2 instead.
fn part2(nums: &BTreeSet<i64>) {
let s: Vec<i64> = nums.iter().rev().map(|n| *n).collect();
println!("COUNT = {}", do_count(&s, 0, *nums.iter().max().unwrap()));
}
// use cached::SizedCache;
// use std::thread::sleep;
// use std::time::Duration;
// cached_key! {
// Key = { format!("{}-{}", idx, cur); };
// }
// #[cached()]
fn do_count(nums: &[i64], idx: usize, cur: i64) -> i64 {
return do_count2(nums, idx, cur);
}
struct P2Solver {
// nums:
nums: Vec<i64>,
memo: HashMap<(usize, i64), i64>
}
impl P2Solver {
fn slice(&self) -> &[i64] {
return &self.nums;
}
// fn new2(nums: BTreeSet<i64>) -> P2Solver {
// return P2Solver::new();
// }
fn count(&mut self, idx: usize, cur: i64) -> i64 {
if idx == self.slice().len() {
if cur <= 3 {
return 1;
} else {
return 0;
}
}
let mut sum = 0;
for (i, n) in self.slice()[idx..].iter().enumerate() {
let diff = cur - n;
let within_joltage = (0 <= diff && diff <= 3);
if !within_joltage {
continue;
}
let mut c = 0;
if self.memo.contains_key(&(i, *n)) {
c= *self.memo.get(&(i,*n)).unwrap();
} else {
c = self.count(i, *n);
// let x = self.memo.get_mut
// self.memo.insert((i, *n), c);
}
}
return sum;
}
}
fn do_count2(nums: &[i64], idx: usize, cur: i64) -> i64 {
trace!("examining cur = {}, idx = {}", cur, idx);
let ns = &nums[idx..];
if cur == 0 && ns.len() == 0 {
trace!("reached base case 1");
return 1;
}
if ns.len() == 0 {
trace!("ran out of things to check");
return 0;
}
let mut sum = 0;
for (i, n) in ns.iter().enumerate() {
let diff = cur - n;
if 1 <= diff && diff <= 3 {
let next = i + 1;
trace!(
"recursing on cur={} from diff={} with num length={}",
*n,
diff,
ns.len() - 1,
);
sum += do_count(&nums, next, *n);
}
if diff > 3 {
break;
}
}
return sum;
}
*/ | 22.723164 | 101 | 0.454749 |
1a1f6fd716dd3674f6743cdb86f8bdd5336b45f1 | 3,973 | use amethyst::{
ecs::prelude::Entity,
input::{is_close_requested, is_key_down},
prelude::*,
ui::{Anchor, UiEvent, UiEventType, UiFinder},
winit::VirtualKeyCode,
};
use tracing::{event, instrument, Level};
use crate::{about::AboutScreen, game::Game};
const BUTTON_START: &str = "start";
const BUTTON_ABOUT: &str = "about";
#[derive(Default, Debug)]
pub struct MainMenu {
ui_root: Option<Entity>,
button_start: Option<Entity>,
button_about: Option<Entity>,
}
impl SimpleState for MainMenu {
#[instrument(skip(data), level = "info")]
fn on_start(&mut self, data: StateData<'_, GameData<'_, '_>>) {
let world = data.world;
let menu = world
.create_entity()
.with(amethyst::ui::UiTransform::new(
format!("menu"),
Anchor::Middle,
Anchor::Middle,
0.,
0.,
0.,
0.,
0.,
))
.build();
let buttons = vec![("Start Game", BUTTON_START), ("About", BUTTON_ABOUT)];
buttons.iter().enumerate().for_each(|(i, (text, id))| {
crate::ui_scheme::Button::new(text, id).create(
world,
menu,
amethyst::ui::UiTransform::new(
format!("{}_container", id),
Anchor::Middle,
Anchor::Middle,
0.,
((buttons.len() as f32) / 2. - 0.5) * 150. - (i as f32) * (150. + 10.),
0.,
0.,
0.,
),
);
});
self.ui_root = Some(menu);
}
#[instrument(skip(state_data), level = "info")]
fn update(&mut self, state_data: &mut StateData<'_, GameData<'_, '_>>) -> SimpleTrans {
// only search for buttons if they have not been found yet
let StateData { world, .. } = state_data;
if self.button_start.is_none() || self.button_about.is_none() {
world.exec(|ui_finder: UiFinder<'_>| {
self.button_start = ui_finder.find(&format!("{}", BUTTON_START));
self.button_about = ui_finder.find(&format!("{}", BUTTON_ABOUT));
});
}
Trans::None
}
#[instrument(skip(_data), level = "info")]
fn handle_event(
&mut self,
_data: StateData<'_, GameData<'_, '_>>,
event: StateEvent,
) -> SimpleTrans {
match event {
StateEvent::Window(event) => {
if is_close_requested(&event) || is_key_down(&event, VirtualKeyCode::Escape) {
event!(Level::INFO, "Quitting Application!");
Trans::Quit
} else {
Trans::None
}
}
StateEvent::Ui(UiEvent {
event_type: UiEventType::Click,
target,
}) => {
if Some(target) == self.button_about {
event!(Level::INFO, "Switching to AboutScreen!");
return Trans::Switch(Box::new(AboutScreen::default()));
}
if Some(target) == self.button_start {
event!(Level::INFO, "Switching to Game!");
return Trans::Switch(Box::new(Game::default()));
}
Trans::None
}
_ => Trans::None,
}
}
#[instrument(skip(data), level = "info")]
fn on_stop(&mut self, data: StateData<GameData>) {
// after destroying the current UI, invalidate references as well (makes things cleaner)
if let Some(root_entity) = self.ui_root {
data.world
.delete_entity(root_entity)
.expect("Failed to remove MainMenu");
}
self.ui_root = None;
self.button_start = None;
self.button_about = None;
}
}
| 31.531746 | 96 | 0.486031 |
112525fcce96efe2a1762a6555f4ddee0a80daa5 | 29,025 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Parameterized string expansion
pub use self::Param::*;
use self::States::*;
use self::FormatState::*;
use self::FormatOp::*;
use std::ascii::OwnedAsciiExt;
use std::mem::replace;
use std::iter::repeat;
#[derive(Copy, PartialEq)]
enum States {
Nothing,
Percent,
SetVar,
GetVar,
PushParam,
CharConstant,
CharClose,
IntConstant(int),
FormatPattern(Flags, FormatState),
SeekIfElse(int),
SeekIfElsePercent(int),
SeekIfEnd(int),
SeekIfEndPercent(int)
}
#[derive(Copy, PartialEq)]
enum FormatState {
FormatStateFlags,
FormatStateWidth,
FormatStatePrecision
}
/// Types of parameters a capability can use
#[allow(missing_docs)]
#[derive(Clone)]
pub enum Param {
Words(String),
Number(int)
}
/// Container for static and dynamic variable arrays
pub struct Variables {
/// Static variables A-Z
sta: [Param; 26],
/// Dynamic variables a-z
dyn: [Param; 26]
}
impl Variables {
/// Return a new zero-initialized Variables
pub fn new() -> Variables {
Variables {
sta: [
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0),
],
dyn: [
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0),
],
}
}
}
/// Expand a parameterized capability
///
/// # Arguments
/// * `cap` - string to expand
/// * `params` - vector of params for %p1 etc
/// * `vars` - Variables struct for %Pa etc
///
/// To be compatible with ncurses, `vars` should be the same between calls to `expand` for
/// multiple capabilities for the same terminal.
pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables)
-> Result<Vec<u8> , String> {
let mut state = Nothing;
// expanded cap will only rarely be larger than the cap itself
let mut output = Vec::with_capacity(cap.len());
let mut stack: Vec<Param> = Vec::new();
// Copy parameters into a local vector for mutability
let mut mparams = [
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0),
];
for (dst, src) in mparams.iter_mut().zip(params.iter()) {
*dst = (*src).clone();
}
for &c in cap {
let cur = c as char;
let mut old_state = state;
match state {
Nothing => {
if cur == '%' {
state = Percent;
} else {
output.push(c);
}
},
Percent => {
match cur {
'%' => { output.push(c); state = Nothing },
'c' => if stack.len() > 0 {
match stack.pop().unwrap() {
// if c is 0, use 0200 (128) for ncurses compatibility
Number(c) => {
output.push(if c == 0 {
128u8
} else {
c as u8
})
}
_ => return Err("a non-char was used with %c".to_string())
}
} else { return Err("stack is empty".to_string()) },
'p' => state = PushParam,
'P' => state = SetVar,
'g' => state = GetVar,
'\'' => state = CharConstant,
'{' => state = IntConstant(0),
'l' => if stack.len() > 0 {
match stack.pop().unwrap() {
Words(s) => stack.push(Number(s.len() as int)),
_ => return Err("a non-str was used with %l".to_string())
}
} else { return Err("stack is empty".to_string()) },
'+' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x + y)),
_ => return Err("non-numbers on stack with +".to_string())
}
} else { return Err("stack is empty".to_string()) },
'-' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x - y)),
_ => return Err("non-numbers on stack with -".to_string())
}
} else { return Err("stack is empty".to_string()) },
'*' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x * y)),
_ => return Err("non-numbers on stack with *".to_string())
}
} else { return Err("stack is empty".to_string()) },
'/' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x / y)),
_ => return Err("non-numbers on stack with /".to_string())
}
} else { return Err("stack is empty".to_string()) },
'm' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x % y)),
_ => return Err("non-numbers on stack with %".to_string())
}
} else { return Err("stack is empty".to_string()) },
'&' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x & y)),
_ => return Err("non-numbers on stack with &".to_string())
}
} else { return Err("stack is empty".to_string()) },
'|' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x | y)),
_ => return Err("non-numbers on stack with |".to_string())
}
} else { return Err("stack is empty".to_string()) },
'^' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(x ^ y)),
_ => return Err("non-numbers on stack with ^".to_string())
}
} else { return Err("stack is empty".to_string()) },
'=' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(if x == y { 1 }
else { 0 })),
_ => return Err("non-numbers on stack with =".to_string())
}
} else { return Err("stack is empty".to_string()) },
'>' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(if x > y { 1 }
else { 0 })),
_ => return Err("non-numbers on stack with >".to_string())
}
} else { return Err("stack is empty".to_string()) },
'<' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(y), Number(x)) => stack.push(Number(if x < y { 1 }
else { 0 })),
_ => return Err("non-numbers on stack with <".to_string())
}
} else { return Err("stack is empty".to_string()) },
'A' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(0), Number(_)) => stack.push(Number(0)),
(Number(_), Number(0)) => stack.push(Number(0)),
(Number(_), Number(_)) => stack.push(Number(1)),
_ => return Err("non-numbers on stack with logical and".to_string())
}
} else { return Err("stack is empty".to_string()) },
'O' => if stack.len() > 1 {
match (stack.pop().unwrap(), stack.pop().unwrap()) {
(Number(0), Number(0)) => stack.push(Number(0)),
(Number(_), Number(_)) => stack.push(Number(1)),
_ => return Err("non-numbers on stack with logical or".to_string())
}
} else { return Err("stack is empty".to_string()) },
'!' => if stack.len() > 0 {
match stack.pop().unwrap() {
Number(0) => stack.push(Number(1)),
Number(_) => stack.push(Number(0)),
_ => return Err("non-number on stack with logical not".to_string())
}
} else { return Err("stack is empty".to_string()) },
'~' => if stack.len() > 0 {
match stack.pop().unwrap() {
Number(x) => stack.push(Number(!x)),
_ => return Err("non-number on stack with %~".to_string())
}
} else { return Err("stack is empty".to_string()) },
'i' => match (mparams[0].clone(), mparams[1].clone()) {
(Number(x), Number(y)) => {
mparams[0] = Number(x+1);
mparams[1] = Number(y+1);
},
(_, _) => return Err("first two params not numbers with %i".to_string())
},
// printf-style support for %doxXs
'd'|'o'|'x'|'X'|'s' => if stack.len() > 0 {
let flags = Flags::new();
let res = format(stack.pop().unwrap(), FormatOp::from_char(cur), flags);
if res.is_err() { return res }
output.push_all(&res.unwrap())
} else { return Err("stack is empty".to_string()) },
':'|'#'|' '|'.'|'0'...'9' => {
let mut flags = Flags::new();
let mut fstate = FormatStateFlags;
match cur {
':' => (),
'#' => flags.alternate = true,
' ' => flags.space = true,
'.' => fstate = FormatStatePrecision,
'0'...'9' => {
flags.width = cur as uint - '0' as uint;
fstate = FormatStateWidth;
}
_ => unreachable!()
}
state = FormatPattern(flags, fstate);
}
// conditionals
'?' => (),
't' => if stack.len() > 0 {
match stack.pop().unwrap() {
Number(0) => state = SeekIfElse(0),
Number(_) => (),
_ => return Err("non-number on stack \
with conditional".to_string())
}
} else { return Err("stack is empty".to_string()) },
'e' => state = SeekIfEnd(0),
';' => (),
_ => {
return Err(format!("unrecognized format option {:?}", cur))
}
}
},
PushParam => {
// params are 1-indexed
stack.push(mparams[match cur.to_digit(10) {
Some(d) => d as usize - 1,
None => return Err("bad param number".to_string())
}].clone());
},
SetVar => {
if cur >= 'A' && cur <= 'Z' {
if stack.len() > 0 {
let idx = (cur as u8) - b'A';
vars.sta[idx as uint] = stack.pop().unwrap();
} else { return Err("stack is empty".to_string()) }
} else if cur >= 'a' && cur <= 'z' {
if stack.len() > 0 {
let idx = (cur as u8) - b'a';
vars.dyn[idx as uint] = stack.pop().unwrap();
} else { return Err("stack is empty".to_string()) }
} else {
return Err("bad variable name in %P".to_string());
}
},
GetVar => {
if cur >= 'A' && cur <= 'Z' {
let idx = (cur as u8) - b'A';
stack.push(vars.sta[idx as uint].clone());
} else if cur >= 'a' && cur <= 'z' {
let idx = (cur as u8) - b'a';
stack.push(vars.dyn[idx as uint].clone());
} else {
return Err("bad variable name in %g".to_string());
}
},
CharConstant => {
stack.push(Number(c as int));
state = CharClose;
},
CharClose => {
if cur != '\'' {
return Err("malformed character constant".to_string());
}
},
IntConstant(i) => {
match cur {
'}' => {
stack.push(Number(i));
state = Nothing;
}
'0'...'9' => {
state = IntConstant(i*10 + (cur as int - '0' as int));
old_state = Nothing;
}
_ => return Err("bad int constant".to_string())
}
}
FormatPattern(ref mut flags, ref mut fstate) => {
old_state = Nothing;
match (*fstate, cur) {
(_,'d')|(_,'o')|(_,'x')|(_,'X')|(_,'s') => if stack.len() > 0 {
let res = format(stack.pop().unwrap(), FormatOp::from_char(cur), *flags);
if res.is_err() { return res }
output.push_all(&res.unwrap());
// will cause state to go to Nothing
old_state = FormatPattern(*flags, *fstate);
} else { return Err("stack is empty".to_string()) },
(FormatStateFlags,'#') => {
flags.alternate = true;
}
(FormatStateFlags,'-') => {
flags.left = true;
}
(FormatStateFlags,'+') => {
flags.sign = true;
}
(FormatStateFlags,' ') => {
flags.space = true;
}
(FormatStateFlags,'0'...'9') => {
flags.width = cur as uint - '0' as uint;
*fstate = FormatStateWidth;
}
(FormatStateFlags,'.') => {
*fstate = FormatStatePrecision;
}
(FormatStateWidth,'0'...'9') => {
let old = flags.width;
flags.width = flags.width * 10 + (cur as uint - '0' as uint);
if flags.width < old { return Err("format width overflow".to_string()) }
}
(FormatStateWidth,'.') => {
*fstate = FormatStatePrecision;
}
(FormatStatePrecision,'0'...'9') => {
let old = flags.precision;
flags.precision = flags.precision * 10 + (cur as uint - '0' as uint);
if flags.precision < old {
return Err("format precision overflow".to_string())
}
}
_ => return Err("invalid format specifier".to_string())
}
}
SeekIfElse(level) => {
if cur == '%' {
state = SeekIfElsePercent(level);
}
old_state = Nothing;
}
SeekIfElsePercent(level) => {
if cur == ';' {
if level == 0 {
state = Nothing;
} else {
state = SeekIfElse(level-1);
}
} else if cur == 'e' && level == 0 {
state = Nothing;
} else if cur == '?' {
state = SeekIfElse(level+1);
} else {
state = SeekIfElse(level);
}
}
SeekIfEnd(level) => {
if cur == '%' {
state = SeekIfEndPercent(level);
}
old_state = Nothing;
}
SeekIfEndPercent(level) => {
if cur == ';' {
if level == 0 {
state = Nothing;
} else {
state = SeekIfEnd(level-1);
}
} else if cur == '?' {
state = SeekIfEnd(level+1);
} else {
state = SeekIfEnd(level);
}
}
}
if state == old_state {
state = Nothing;
}
}
Ok(output)
}
#[derive(Copy, PartialEq)]
struct Flags {
width: uint,
precision: uint,
alternate: bool,
left: bool,
sign: bool,
space: bool
}
impl Flags {
fn new() -> Flags {
Flags{ width: 0, precision: 0, alternate: false,
left: false, sign: false, space: false }
}
}
#[derive(Copy)]
enum FormatOp {
FormatDigit,
FormatOctal,
FormatHex,
FormatHEX,
FormatString
}
impl FormatOp {
fn from_char(c: char) -> FormatOp {
match c {
'd' => FormatDigit,
'o' => FormatOctal,
'x' => FormatHex,
'X' => FormatHEX,
's' => FormatString,
_ => panic!("bad FormatOp char")
}
}
fn to_char(self) -> char {
match self {
FormatDigit => 'd',
FormatOctal => 'o',
FormatHex => 'x',
FormatHEX => 'X',
FormatString => 's'
}
}
}
fn format(val: Param, op: FormatOp, flags: Flags) -> Result<Vec<u8> ,String> {
let mut s = match val {
Number(d) => {
let s = match (op, flags.sign) {
(FormatDigit, true) => format!("{:+}", d).into_bytes(),
(FormatDigit, false) => format!("{}", d).into_bytes(),
(FormatOctal, _) => format!("{:o}", d).into_bytes(),
(FormatHex, _) => format!("{:x}", d).into_bytes(),
(FormatHEX, _) => format!("{:X}", d).into_bytes(),
(FormatString, _) => {
return Err("non-number on stack with %s".to_string())
}
};
let mut s: Vec<u8> = s.into_iter().collect();
if flags.precision > s.len() {
let mut s_ = Vec::with_capacity(flags.precision);
let n = flags.precision - s.len();
s_.extend(repeat(b'0').take(n));
s_.extend(s.into_iter());
s = s_;
}
assert!(!s.is_empty(), "string conversion produced empty result");
match op {
FormatDigit => {
if flags.space && !(s[0] == b'-' || s[0] == b'+' ) {
s.insert(0, b' ');
}
}
FormatOctal => {
if flags.alternate && s[0] != b'0' {
s.insert(0, b'0');
}
}
FormatHex => {
if flags.alternate {
let s_ = replace(&mut s, vec!(b'0', b'x'));
s.extend(s_.into_iter());
}
}
FormatHEX => {
s = s.into_ascii_uppercase();
if flags.alternate {
let s_ = replace(&mut s, vec!(b'0', b'X'));
s.extend(s_.into_iter());
}
}
FormatString => unreachable!()
}
s
}
Words(s) => {
match op {
FormatString => {
let mut s = s.as_bytes().to_vec();
if flags.precision > 0 && flags.precision < s.len() {
s.truncate(flags.precision);
}
s
}
_ => {
return Err(format!("non-string on stack with %{:?}",
op.to_char()))
}
}
}
};
if flags.width > s.len() {
let n = flags.width - s.len();
if flags.left {
s.extend(repeat(b' ').take(n));
} else {
let mut s_ = Vec::with_capacity(flags.width);
s_.extend(repeat(b' ').take(n));
s_.extend(s.into_iter());
s = s_;
}
}
Ok(s)
}
#[cfg(test)]
mod test {
use super::{expand,Param,Words,Variables,Number};
use std::result::Result::Ok;
#[test]
fn test_basic_setabf() {
let s = b"\\E[48;5;%p1%dm";
assert_eq!(expand(s, &[Number(1)], &mut Variables::new()).unwrap(),
"\\E[48;5;1m".bytes().collect::<Vec<_>>());
}
#[test]
fn test_multiple_int_constants() {
assert_eq!(expand(b"%{1}%{2}%d%d", &[], &mut Variables::new()).unwrap(),
"21".bytes().collect::<Vec<_>>());
}
#[test]
fn test_op_i() {
let mut vars = Variables::new();
assert_eq!(expand(b"%p1%d%p2%d%p3%d%i%p1%d%p2%d%p3%d",
&[Number(1),Number(2),Number(3)], &mut vars),
Ok("123233".bytes().collect::<Vec<_>>()));
assert_eq!(expand(b"%p1%d%p2%d%i%p1%d%p2%d", &[], &mut vars),
Ok("0011".bytes().collect::<Vec<_>>()));
}
#[test]
fn test_param_stack_failure_conditions() {
let mut varstruct = Variables::new();
let vars = &mut varstruct;
fn get_res(fmt: &str, cap: &str, params: &[Param], vars: &mut Variables) ->
Result<Vec<u8>, String>
{
let mut u8v: Vec<_> = fmt.bytes().collect();
u8v.extend(cap.bytes());
expand(&u8v, params, vars)
}
let caps = ["%d", "%c", "%s", "%Pa", "%l", "%!", "%~"];
for &cap in &caps {
let res = get_res("", cap, &[], vars);
assert!(res.is_err(),
"Op {} succeeded incorrectly with 0 stack entries", cap);
let p = if cap == "%s" || cap == "%l" {
Words("foo".to_string())
} else {
Number(97)
};
let res = get_res("%p1", cap, &[p], vars);
assert!(res.is_ok(),
"Op {} failed with 1 stack entry: {}", cap, res.err().unwrap());
}
let caps = ["%+", "%-", "%*", "%/", "%m", "%&", "%|", "%A", "%O"];
for &cap in &caps {
let res = expand(cap.as_bytes(), &[], vars);
assert!(res.is_err(),
"Binop {} succeeded incorrectly with 0 stack entries", cap);
let res = get_res("%{1}", cap, &[], vars);
assert!(res.is_err(),
"Binop {} succeeded incorrectly with 1 stack entry", cap);
let res = get_res("%{1}%{2}", cap, &[], vars);
assert!(res.is_ok(),
"Binop {} failed with 2 stack entries: {:?}", cap, res.err().unwrap());
}
}
#[test]
fn test_push_bad_param() {
assert!(expand(b"%pa", &[], &mut Variables::new()).is_err());
}
#[test]
fn test_comparison_ops() {
let v = [('<', [1u8, 0u8, 0u8]), ('=', [0u8, 1u8, 0u8]), ('>', [0u8, 0u8, 1u8])];
for &(op, bs) in &v {
let s = format!("%{{1}}%{{2}}%{}%d", op);
let res = expand(s.as_bytes(), &[], &mut Variables::new());
assert!(res.is_ok(), res.err().unwrap());
assert_eq!(res.unwrap(), [b'0' + bs[0]]);
let s = format!("%{{1}}%{{1}}%{}%d", op);
let res = expand(s.as_bytes(), &[], &mut Variables::new());
assert!(res.is_ok(), res.err().unwrap());
assert_eq!(res.unwrap(), [b'0' + bs[1]]);
let s = format!("%{{2}}%{{1}}%{}%d", op);
let res = expand(s.as_bytes(), &[], &mut Variables::new());
assert!(res.is_ok(), res.err().unwrap());
assert_eq!(res.unwrap(), [b'0' + bs[2]]);
}
}
#[test]
fn test_conditionals() {
let mut vars = Variables::new();
let s = b"\\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m";
let res = expand(s, &[Number(1)], &mut vars);
assert!(res.is_ok(), res.err().unwrap());
assert_eq!(res.unwrap(),
"\\E[31m".bytes().collect::<Vec<_>>());
let res = expand(s, &[Number(8)], &mut vars);
assert!(res.is_ok(), res.err().unwrap());
assert_eq!(res.unwrap(),
"\\E[90m".bytes().collect::<Vec<_>>());
let res = expand(s, &[Number(42)], &mut vars);
assert!(res.is_ok(), res.err().unwrap());
assert_eq!(res.unwrap(),
"\\E[38;5;42m".bytes().collect::<Vec<_>>());
}
#[test]
fn test_format() {
let mut varstruct = Variables::new();
let vars = &mut varstruct;
assert_eq!(expand(b"%p1%s%p2%2s%p3%2s%p4%.2s",
&[Words("foo".to_string()),
Words("foo".to_string()),
Words("f".to_string()),
Words("foo".to_string())], vars),
Ok("foofoo ffo".bytes().collect::<Vec<_>>()));
assert_eq!(expand(b"%p1%:-4.2s", &[Words("foo".to_string())], vars),
Ok("fo ".bytes().collect::<Vec<_>>()));
assert_eq!(expand(b"%p1%d%p1%.3d%p1%5d%p1%:+d", &[Number(1)], vars),
Ok("1001 1+1".bytes().collect::<Vec<_>>()));
assert_eq!(expand(b"%p1%o%p1%#o%p2%6.4x%p2%#6.4X", &[Number(15), Number(27)], vars),
Ok("17017 001b0X001B".bytes().collect::<Vec<_>>()));
}
}
| 41.228693 | 97 | 0.396279 |
69c4414a0b01b21f6d21c884095001435ab4fb89 | 18,795 | ///////////////////////////////////////////////////////////////////////////////
//
// Copyright 2018-2021 Robonomics Network <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
//! Polkadot collator service implementation.
use codec::Encode;
use cumulus_client_cli::CollatorOptions;
use cumulus_client_consensus_common::ParachainConsensus;
use cumulus_client_consensus_relay_chain::{
build_relay_chain_consensus, BuildRelayChainConsensusParams,
};
use cumulus_client_network::BlockAnnounceValidator;
use cumulus_client_service::{
prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams,
};
use cumulus_primitives_parachain_inherent::ParachainInherentData;
use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain;
use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult};
use cumulus_relay_chain_rpc_interface::RelayChainRPCInterface;
use hex_literal::hex;
use polkadot_service::CollatorPair;
use robonomics_primitives::{AccountId, Balance, Block, Hash, Index};
use robonomics_protocol::pubsub::gossipsub::PubSub;
pub use sc_executor::NativeElseWasmExecutor;
use sc_network::NetworkService;
use sc_service::{Configuration, Role, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{TelemetryHandle, TelemetryWorkerHandle};
use sp_keystore::SyncCryptoStorePtr;
use sp_runtime::traits::BlakeTwo256;
use sp_trie::PrefixedMemoryDB;
use std::sync::Arc;
use std::time::Duration;
use substrate_prometheus_endpoint::Registry;
async fn build_relay_chain_interface(
polkadot_config: Configuration,
parachain_config: &Configuration,
telemetry_worker_handle: Option<TelemetryWorkerHandle>,
task_manager: &mut TaskManager,
collator_options: CollatorOptions,
) -> RelayChainResult<(
Arc<(dyn RelayChainInterface + 'static)>,
Option<CollatorPair>,
)> {
match collator_options.relay_chain_rpc_url {
Some(relay_chain_url) => Ok((
Arc::new(RelayChainRPCInterface::new(relay_chain_url).await?) as Arc<_>,
None,
)),
None => build_inprocess_relay_chain(
polkadot_config,
parachain_config,
telemetry_worker_handle,
task_manager,
None,
),
}
}
fn new_partial<RuntimeApi, Executor, BIQ>(
config: &Configuration,
build_import_queue: BIQ,
) -> Result<
sc_service::PartialComponents<
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullBackend<Block>,
(),
sc_consensus::import_queue::BasicQueue<Block, PrefixedMemoryDB<BlakeTwo256>>,
sc_transaction_pool::FullPool<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
>,
(
Option<sc_telemetry::Telemetry>,
Option<sc_telemetry::TelemetryWorkerHandle>,
),
>,
sc_service::Error,
>
where
Executor: sc_executor::NativeExecutionDispatch + 'static,
RuntimeApi: sp_api::ConstructRuntimeApi<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
> + Send
+ Sync
+ 'static,
RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+ sp_api::Metadata<Block>
+ sp_session::SessionKeys<Block>
+ sp_api::ApiExt<
Block,
StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
> + sp_offchain::OffchainWorkerApi<Block>
+ sp_block_builder::BlockBuilder<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
BIQ: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
&Configuration,
Option<TelemetryHandle>,
&TaskManager,
) -> Result<
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
>,
sc_service::Error,
>,
{
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = sc_telemetry::TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let executor = NativeElseWasmExecutor::<Executor>::new(
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
config.runtime_cache_size,
);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);
let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager
.spawn_handle()
.spawn("telemetry", None, worker.run());
telemetry
});
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);
let import_queue = build_import_queue(
client.clone(),
config,
telemetry.as_ref().map(|telemetry| telemetry.handle()),
&task_manager,
)?;
let params = sc_service::PartialComponents {
backend,
client,
import_queue,
keystore_container,
task_manager,
transaction_pool,
select_chain: (),
other: (telemetry, telemetry_worker_handle),
};
Ok(params)
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
///
/// This is the actual implementation that is abstract over the executor and the runtime api.
#[sc_tracing::logging::prefix_logs_with("Parachain")]
pub async fn start_node_impl<RuntimeApi, Executor, BIQ, BIC>(
parachain_config: Configuration,
polkadot_config: Configuration,
collator_options: CollatorOptions,
id: polkadot_primitives::v2::Id,
lighthouse_account: Option<AccountId>,
build_import_queue: BIQ,
build_consensus: BIC,
heartbeat_interval: u64,
) -> sc_service::error::Result<TaskManager>
where
Executor: sc_executor::NativeExecutionDispatch + 'static,
RuntimeApi: sp_api::ConstructRuntimeApi<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
> + Send
+ Sync
+ 'static,
RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+ sp_api::Metadata<Block>
+ sp_session::SessionKeys<Block>
+ sp_api::ApiExt<
Block,
StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
> + sp_offchain::OffchainWorkerApi<Block>
+ sp_block_builder::BlockBuilder<Block>
+ cumulus_primitives_core::CollectCollationInfo<Block>
+ pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance>
+ substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Index>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
BIQ: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
&Configuration,
Option<TelemetryHandle>,
&TaskManager,
) -> Result<
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
>,
sc_service::Error,
>,
BIC: FnOnce(
polkadot_primitives::v2::Id,
Option<AccountId>,
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Option<&Registry>,
Option<TelemetryHandle>,
&TaskManager,
Arc<dyn RelayChainInterface>,
Arc<
sc_transaction_pool::FullPool<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
>,
>,
Arc<NetworkService<Block, Hash>>,
SyncCryptoStorePtr,
bool,
) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
{
if matches!(parachain_config.role, Role::Light) {
return Err("Light client not supported!".into());
}
let parachain_config = prepare_node_config(parachain_config);
let params = new_partial::<RuntimeApi, Executor, BIQ>(¶chain_config, build_import_queue)?;
let (mut telemetry, telemetry_worker_handle) = params.other;
let client = params.client.clone();
let backend = params.backend.clone();
let mut task_manager = params.task_manager;
let (relay_chain_interface, collator_key) = build_relay_chain_interface(
polkadot_config,
¶chain_config,
telemetry_worker_handle,
&mut task_manager,
collator_options.clone(),
)
.await
.map_err(|e| match e {
RelayChainError::ServiceError(polkadot_service::Error::Sub(x)) => x,
s => s.to_string().into(),
})?;
let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id);
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let force_authoring = parachain_config.force_authoring;
let is_authority = parachain_config.role.is_authority();
let transaction_pool = params.transaction_pool.clone();
let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
let (network, system_rpc_tx, start_network) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: ¶chain_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue: import_queue.clone(),
block_announce_validator_builder: Some(Box::new(|_| {
Box::new(block_announce_validator)
})),
warp_sync: None,
})?;
let rpc_client = client.clone();
let rpc_pool = transaction_pool.clone();
let (pubsub, pubsub_worker) =
PubSub::new(Duration::from_millis(heartbeat_interval)).expect("New PubSub");
task_manager
.spawn_handle()
.spawn("pubsub_parachain", None, pubsub_worker);
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
rpc_builder: Box::new(move |deny_unsafe, _| {
let deps = robonomics_rpc::FullDeps {
client: rpc_client.clone(),
pool: rpc_pool.clone(),
deny_unsafe,
pubsub: pubsub.clone(),
};
robonomics_rpc::create_full(deps).map_err(Into::into)
}),
client: client.clone(),
transaction_pool: transaction_pool.clone(),
task_manager: &mut task_manager,
config: parachain_config,
keystore: params.keystore_container.sync_keystore(),
backend: backend.clone(),
network: network.clone(),
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;
let announce_block = {
let network = network.clone();
Arc::new(move |hash, data| network.announce_block(hash, data))
};
let relay_chain_slot_duration = Duration::from_secs(6);
if is_authority {
let parachain_consensus = build_consensus(
id,
lighthouse_account,
client.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|t| t.handle()),
&task_manager,
relay_chain_interface.clone(),
transaction_pool,
network,
params.keystore_container.sync_keystore(),
force_authoring,
)?;
let spawner = task_manager.spawn_handle();
let params = StartCollatorParams {
para_id: id,
block_status: client.clone(),
import_queue,
announce_block,
client: client.clone(),
task_manager: &mut task_manager,
relay_chain_interface,
spawner,
parachain_consensus,
collator_key: collator_key.expect("Command line arguments do not allow this. qed"),
relay_chain_slot_duration,
};
start_collator(params).await?;
} else {
let params = StartFullNodeParams {
client: client.clone(),
collator_options,
announce_block,
task_manager: &mut task_manager,
para_id: id,
relay_chain_interface,
relay_chain_slot_duration,
import_queue,
};
start_full_node(params)?;
}
start_network.start_network();
Ok(task_manager)
}
/// Build the import queue for the open consensus parachain runtime.
pub fn build_open_import_queue<RuntimeApi, Executor>(
client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
config: &Configuration,
_telemetry: Option<TelemetryHandle>,
task_manager: &TaskManager,
) -> Result<
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
>,
sc_service::Error,
>
where
Executor: sc_executor::NativeExecutionDispatch + 'static,
RuntimeApi: sp_api::ConstructRuntimeApi<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
> + Send
+ Sync
+ 'static,
RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+ sp_api::Metadata<Block>
+ sp_session::SessionKeys<Block>
+ sp_api::ApiExt<
Block,
StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
> + sp_offchain::OffchainWorkerApi<Block>
+ sp_block_builder::BlockBuilder<Block>
+ cumulus_primitives_core::CollectCollationInfo<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
{
let registry = config.prometheus_registry();
cumulus_client_consensus_relay_chain::import_queue(
client.clone(),
client.clone(),
|_, _| async {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
Ok(timestamp)
},
&task_manager.spawn_essential_handle(),
registry.clone(),
)
.map_err(Into::into)
}
/// Build the open set consensus.
pub fn build_open_consensus<RuntimeApi, Executor>(
para_id: polkadot_primitives::v2::Id,
lighthouse_account: Option<AccountId>,
client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
prometheus_registry: Option<&Registry>,
telemetry: Option<TelemetryHandle>,
task_manager: &TaskManager,
relay_chain_interface: Arc<dyn RelayChainInterface>,
transaction_pool: Arc<
sc_transaction_pool::FullPool<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
>,
>,
_sync_oracle: Arc<NetworkService<Block, Hash>>,
_keystore: SyncCryptoStorePtr,
_force_authoring: bool,
) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>
where
Executor: sc_executor::NativeExecutionDispatch + 'static,
RuntimeApi: sp_api::ConstructRuntimeApi<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
> + Send
+ Sync
+ 'static,
RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+ sp_api::Metadata<Block>
+ sp_session::SessionKeys<Block>
+ sp_api::ApiExt<
Block,
StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
> + sp_offchain::OffchainWorkerApi<Block>
+ sp_block_builder::BlockBuilder<Block>
+ cumulus_primitives_core::CollectCollationInfo<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
{
let account = lighthouse_account.unwrap_or(
// Treasury by default
hex!["6d6f646c70792f74727372790000000000000000000000000000000000000000"].into(),
);
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
client.clone(),
transaction_pool,
prometheus_registry,
telemetry.clone(),
);
let consensus = build_relay_chain_consensus(BuildRelayChainConsensusParams {
para_id,
proposer_factory,
block_import: client.clone(),
relay_chain_interface: relay_chain_interface.clone(),
create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
let encoded_account = account.encode();
let relay_chain_interface = relay_chain_interface.clone();
async move {
let parachain_inherent = ParachainInherentData::create_at(
relay_parent,
&relay_chain_interface,
&validation_data,
para_id,
)
.await;
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let lighthouse =
pallet_robonomics_lighthouse::InherentDataProvider(encoded_account);
let parachain = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(
"Failed to create parachain inherent",
)
})?;
Ok((timestamp, lighthouse, parachain))
}
},
});
Ok(consensus)
}
| 36.566148 | 99 | 0.650865 |
b9d6a2bc90dca4e69dcc9754f121a3260feea922 | 2,829 | use super::jpm::Jpm;
use super::JwmHeader;
use idp2p_common::anyhow::Result;
use idp2p_common::base64url;
use idp2p_common::ed25519_dalek::{PublicKey, Signature, Verifier};
use idp2p_common::secret::EdSecret;
use idp2p_common::encode;
use idp2p_common::serde_json::json;
use serde::{Deserialize, Serialize};
use std::convert::TryInto;
const TYP: &str = "application/didcomm-signed+json";
const ALG: &str = "EdDSA";
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Jws {
pub payload: String,
pub signatures: Vec<JwsSignature>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct JwsSignature {
pub protected: String,
pub signature: String,
pub header: JwmHeader,
}
impl JwsSignature {
fn get_signature_bytes(&self) -> Result<[u8; 64]> {
let sig_vec = base64url::decode_str(&self.signature)?;
Ok(sig_vec.try_into().unwrap())
}
}
impl Jws {
pub fn new(jpm: Jpm, secret: EdSecret) -> Result<Jws> {
let kid = format!("{}#{}", jpm.from, encode(&secret.to_publickey()));
let header = JwmHeader { kid: kid };
let payload_b64 = base64url::encode(jpm)?;
let protected_json = json!({"typ": TYP.to_owned(), "alg": ALG.to_owned()});
let protected_b64 = base64url::encode_str(&protected_json.to_string())?;
let compact = format!("{protected_b64}.{payload_b64}");
let sig_data = secret.sign_str(&compact);
let sig_b64 = base64url::encode_bytes(&sig_data)?;
let jws_signature = JwsSignature {
protected: protected_b64,
signature: sig_b64,
header: header,
};
Ok(Jws {
payload: payload_b64,
signatures: vec![jws_signature],
})
}
pub fn verify(&self, from_public: &[u8]) -> Result<bool> {
let protected_json = json!({"typ": TYP.to_owned(), "alg": ALG.to_owned()});
let protected_b64 = base64url::encode_str(&protected_json.to_string())?;
let payload_b64 = self.payload.clone();
let compact = format!("{protected_b64}.{payload_b64}");
let public_key: PublicKey = PublicKey::from_bytes(from_public)?;
let signature_bytes = self.signatures[0].get_signature_bytes()?;
let signature = Signature::from(signature_bytes);
public_key.verify(compact.as_bytes(), &signature)?;
Ok(true)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::json::didcomm::jwm::Jwm;
use crate::json::didcomm::jwm::JwmBody;
#[test]
fn new_test() {
let secret = EdSecret::new();
let jwm = Jwm::new("from", "to", JwmBody::Message("body".to_owned()));
let jws = Jws::new(Jpm::from(jwm), secret.clone()).unwrap();
let r = jws.verify(&secret.to_publickey());
assert!(r.is_ok());
}
}
| 34.084337 | 83 | 0.630965 |
fbb1b798518dce365d0c6a79cc8888a81d0554ab | 1,491 | //! Scarlet is a library for making color, color spaces, and everything that comes with it simple to
//! work with. The underlying philosophy is that if all you have is a hammer, everything looks like a
//! nail: existing color libraries often only work with RGB or other convenient color spaces, and so
//! go to great lengths to invent complicated workarounds for the essential problems with RGB and its
//! ilk, namely not being very good analogues to the way humans actually see color. Scarlet makes
//! working with color convenient enough that it's *easier* to treat colors correctly than it is to do
//! anything else.
#![doc(html_root_url = "https://docs.rs/scarlet/1.0.2")]
// we don't mess around with documentation
#![deny(missing_docs)]
// Clippy doesn't like long decimals, but adding separators in decimals isn't any more readable
// compare -0.96924 with -0.96_924
#![allow(clippy::unreadable_literal)]
extern crate csv;
extern crate geo;
#[macro_use]
extern crate rulinalg;
extern crate num;
extern crate serde;
#[macro_use]
extern crate serde_derive;
// extern crate termion;
#[macro_use]
extern crate lazy_static;
pub mod bound;
pub mod color;
pub mod colormap;
pub mod colorpoint;
pub mod colors;
mod consts;
pub mod coord;
mod csscolor;
mod cssnumeric;
pub mod illuminants;
pub mod material_colors;
mod matplotlib_cmaps;
pub mod prelude;
mod visual_gamut;
// pub mod doc;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
| 29.235294 | 102 | 0.743796 |
1ae77abea01dd0bfd387bbeba65c2e5be7d03087 | 27,305 | //
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{logger::Logger, lookup::LookupData};
use anyhow::Context;
use byteorder::{ByteOrder, LittleEndian};
use futures::future::FutureExt;
use log::Level;
use oak_functions_abi::proto::{OakStatus, Request, Response, ServerPolicy, StatusCode};
use serde::Deserialize;
use std::{collections::HashMap, convert::TryInto, str, sync::Arc, time::Duration};
use wasmi::ValueType;
const MAIN_FUNCTION_NAME: &str = "main";
const ALLOC_FUNCTION_NAME: &str = "alloc";
/// Wasm host function index numbers for `wasmi` to map import names with. This numbering is not
/// exposed to the Wasm client. See https://docs.rs/wasmi/0.6.2/wasmi/trait.Externals.html
const READ_REQUEST: usize = 0;
const WRITE_RESPONSE: usize = 1;
const STORAGE_GET_ITEM: usize = 2;
const WRITE_LOG_MESSAGE: usize = 3;
const EXTENSION_INDEX_OFFSET: usize = 10;
// Type aliases for positions and offsets in Wasm linear memory. Any future 64-bit version
// of Wasm would use different types.
pub type AbiPointer = u32;
pub type AbiPointerOffset = u32;
/// Wasm type identifier for position/offset values in linear memory. Any future 64-bit version of
/// Wasm would use a different value.
pub const ABI_USIZE: ValueType = ValueType::I32;
/// Minimum size of constant response bytes. It is large enough to fit an error response, in case
/// the policy is violated.
const MIN_RESPONSE_SIZE: u32 = 50;
/// Similar to [`ServerPolicy`], but it is used for reading the policy provided in the config,
/// and is therefore not guaranteed to be valid.
#[derive(Deserialize, Debug, Clone, Copy)]
#[serde(deny_unknown_fields)]
pub struct Policy {
/// See [`Policy::constant_response_size_bytes`]
pub constant_response_size_bytes: u32,
/// A fixed response time. See [`ServerPolicy::constant_processing_time_ms`].
#[serde(with = "humantime_serde")]
pub constant_processing_time: Duration,
}
impl Policy {
pub fn validate(&self) -> anyhow::Result<ServerPolicy> {
anyhow::ensure!(
self.constant_response_size_bytes >= MIN_RESPONSE_SIZE,
"Response size is too small",
);
Ok(ServerPolicy {
constant_response_size_bytes: self.constant_response_size_bytes,
constant_processing_time_ms: self
.constant_processing_time
.as_millis()
.try_into()
.context("could not convert milliseconds to u32")?,
})
}
}
/// Trait with a single function for padding the body of an object so that it could be serialized
/// into a byte array of a fixed size.
trait FixedSizeBodyPadder {
/// Adds padding to the body of this instance to make the size of the body equal to `body_size`.
fn pad(&self, body_size: usize) -> anyhow::Result<Self>
where
Self: std::marker::Sized;
}
impl FixedSizeBodyPadder for Response {
/// Creates and returns a new [`Response`] instance with the same `status` and `body` as `self`,
/// except that the `body` may be padded, by adding a number trailing 0s, to make its length
/// equal to `body_size`. Sets the `length` of the new instance to the length of `self.body`.
/// Returns an error if the length of the `body` is larger than `body_size`.
fn pad(&self, body_size: usize) -> anyhow::Result<Self> {
if self.body.len() <= body_size {
let mut body = self.body.as_slice().to_vec();
// Set the length to the actual length of the body before padding.
let length = body.len() as u64;
// Add trailing 0s
body.resize(body_size, 0);
Ok(Response {
status: self.status,
body,
length,
})
} else {
anyhow::bail!("response body is larger than the input body_size")
}
}
}
/// Trait for implementing extensions, to implement new native functionality.
pub trait OakApiNativeExtension {
/// Similar to `invoke_index` in [`wasmi::Externals`], but may return a result to be
/// written into the memory of the `WasmState`.
fn invoke(
&mut self,
wasm_state: &mut WasmState,
args: wasmi::RuntimeArgs,
) -> Result<Result<(), OakStatus>, wasmi::Trap>;
/// Metadata about this Extension, including the exported host function name, and the function's
/// signature.
fn get_metadata(&self) -> (String, wasmi::Signature);
/// Performs any cleanup or terminating behavior necessary before destroying the WasmState.
fn terminate(&mut self) -> anyhow::Result<()>;
}
pub trait ExtensionFactory {
fn create(&self) -> anyhow::Result<BoxedExtension>;
}
pub type BoxedExtension = Box<dyn OakApiNativeExtension + Send + Sync>;
pub type BoxedExtensionFactory = Box<dyn ExtensionFactory + Send + Sync>;
/// `WasmState` holds runtime values for a particular execution instance of Wasm, handling a
/// single user request. The methods here correspond to the ABI host functions that allow the Wasm
/// module to exchange the request and the response with the Oak functions server. These functions
/// translate values between Wasm linear memory and Rust types.
pub struct WasmState {
request_bytes: Vec<u8>,
response_bytes: Vec<u8>,
lookup_data: Arc<LookupData>,
instance: Option<wasmi::ModuleRef>,
memory: Option<wasmi::MemoryRef>,
logger: Logger,
/// A mapping of internal host functions to the corresponding [`OakApiNativeExtension`].
extensions_indices: Option<HashMap<usize, BoxedExtension>>,
/// A mapping of host function names to metadata required for resolving the function.
extensions_metadata: HashMap<String, (usize, wasmi::Signature)>,
}
impl WasmState {
/// Helper function to get memory.
pub fn get_memory(&self) -> &wasmi::MemoryRef {
self.memory
.as_ref()
.expect("WasmState memory not attached!?")
}
/// Validates whether a given address range (inclusive) falls within the currently allocated
/// range of guest memory.
fn validate_range(&self, addr: AbiPointer, offset: AbiPointerOffset) -> Result<(), OakStatus> {
let memory_size: wasmi::memory_units::Bytes = self.get_memory().current_size().into();
// Check whether the end address is below or equal to the size of the guest memory.
if wasmi::memory_units::Bytes((addr as usize) + (offset as usize)) <= memory_size {
Ok(())
} else {
Err(OakStatus::ErrInvalidArgs)
}
}
pub fn write_buffer_to_wasm_memory(
&self,
source: &[u8],
dest: AbiPointer,
) -> Result<(), OakStatus> {
self.validate_range(dest, source.len() as u32)?;
self.get_memory().set(dest, source).map_err(|err| {
self.logger.log_sensitive(
Level::Error,
&format!("Unable to write buffer into guest memory: {:?}", err),
);
OakStatus::ErrInvalidArgs
})
}
pub fn write_u32_to_wasm_memory(
&self,
value: u32,
address: AbiPointer,
) -> Result<(), OakStatus> {
let value_bytes = &mut [0; 4];
LittleEndian::write_u32(value_bytes, value);
self.get_memory().set(address, value_bytes).map_err(|err| {
self.logger.log_sensitive(
Level::Error,
&format!("Unable to write u32 value into guest memory: {:?}", err),
);
OakStatus::ErrInvalidArgs
})
}
/// Corresponds to the host ABI function [`read_request`](https://github.com/project-oak/oak/blob/main/docs/oak_functions_abi.md#read_request).
pub fn read_request(
&mut self,
dest_ptr_ptr: AbiPointer,
dest_len_ptr: AbiPointer,
) -> Result<(), OakStatus> {
let dest_ptr = self.alloc(self.request_bytes.len() as u32);
self.write_buffer_to_wasm_memory(&self.request_bytes, dest_ptr)?;
self.write_u32_to_wasm_memory(dest_ptr, dest_ptr_ptr)?;
self.write_u32_to_wasm_memory(self.request_bytes.len() as u32, dest_len_ptr)?;
Ok(())
}
/// Corresponds to the host ABI function [`write_response`](https://github.com/project-oak/oak/blob/main/docs/oak_functions_abi.md#write_response).
pub fn write_response(
&mut self,
buf_ptr: AbiPointer,
buf_len: AbiPointerOffset,
) -> Result<(), OakStatus> {
let response = self
.get_memory()
.get(buf_ptr, buf_len as usize)
.map_err(|err| {
self.logger.log_sensitive(
Level::Error,
&format!(
"write_response(): Unable to read name from guest memory: {:?}",
err
),
);
OakStatus::ErrInvalidArgs
})?;
self.response_bytes = response;
Ok(())
}
/// Corresponds to the host ABI function [`write_log_message`](https://github.com/project-oak/oak/blob/main/docs/oak_functions_abi.md#write_log_message).
pub fn write_log_message(
&mut self,
buf_ptr: AbiPointer,
buf_len: AbiPointerOffset,
) -> Result<(), OakStatus> {
let raw_log = self
.get_memory()
.get(buf_ptr, buf_len as usize)
.map_err(|err| {
self.logger.log_sensitive(
Level::Error,
&format!(
"write_log_message(): Unable to read message from guest memory: {:?}",
err
),
);
OakStatus::ErrInvalidArgs
})?;
let log_message = str::from_utf8(raw_log.as_slice()).map_err(|err| {
self.logger.log_sensitive(
Level::Warn,
&format!(
"write_log_message(): Not a valid UTF-8 encoded string: {:?}\nContent: {:?}",
err, raw_log
),
);
OakStatus::ErrInvalidArgs
})?;
self.logger
.log_sensitive(Level::Debug, &format!("[Wasm] {}", log_message));
Ok(())
}
/// Corresponds to the host ABI function [`storage_get_item`](https://github.com/project-oak/oak/blob/main/docs/oak_functions_abi.md#storage_get_item).
pub fn storage_get_item(
&mut self,
key_ptr: AbiPointer,
key_len: AbiPointerOffset,
value_ptr_ptr: AbiPointer,
value_len_ptr: AbiPointer,
) -> Result<(), OakStatus> {
let key = self
.get_memory()
.get(key_ptr, key_len as usize)
.map_err(|err| {
self.logger.log_sensitive(
Level::Error,
&format!(
"storage_get_item(): Unable to read key from guest memory: {:?}",
err
),
);
OakStatus::ErrInvalidArgs
})?;
self.logger.log_sensitive(
Level::Debug,
&format!("storage_get_item(): key: {}", format_bytes(&key)),
);
match self.lookup_data.get(&key) {
Some(value) => {
// Truncate value for logging.
let value_to_log = value.clone().into_iter().take(512).collect::<Vec<_>>();
self.logger.log_sensitive(
Level::Debug,
&format!("storage_get_item(): value: {}", format_bytes(&value_to_log)),
);
let dest_ptr = self.alloc(value.len() as u32);
self.write_buffer_to_wasm_memory(&value, dest_ptr)?;
self.write_u32_to_wasm_memory(dest_ptr, value_ptr_ptr)?;
self.write_u32_to_wasm_memory(value.len() as u32, value_len_ptr)?;
Ok(())
}
None => {
self.logger
.log_sensitive(Level::Debug, "storage_get_item(): value not found");
Err(OakStatus::ErrStorageItemNotFound)
}
}
}
pub fn alloc(&mut self, len: u32) -> AbiPointer {
let result = self.instance.as_ref().unwrap().invoke_export(
ALLOC_FUNCTION_NAME,
&[wasmi::RuntimeValue::I32(len as i32)],
// When calling back into `alloc` we don't need to expose any of the rest of the ABI
// methods.
&mut wasmi::NopExternals,
);
let result_value = result
.expect("`alloc` call failed")
.expect("no value returned from `alloc`");
match result_value {
wasmi::RuntimeValue::I32(v) => v as u32,
_ => panic!("invalid value type returned from `alloc`"),
}
}
}
impl wasmi::Externals for WasmState {
/// Invocation of a host function specified by its registered index. Acts as a wrapper for
/// the relevant native function, just:
/// - checking argument types (which should be correct as `wasmi` will only pass through those
/// types that were specified when the host function was registered with `resolv_func`).
/// - mapping resulting return/error values.
fn invoke_index(
&mut self,
index: usize,
args: wasmi::RuntimeArgs,
) -> Result<Option<wasmi::RuntimeValue>, wasmi::Trap> {
match index {
READ_REQUEST => {
map_host_errors(self.read_request(args.nth_checked(0)?, args.nth_checked(1)?))
}
WRITE_RESPONSE => {
map_host_errors(self.write_response(args.nth_checked(0)?, args.nth_checked(1)?))
}
WRITE_LOG_MESSAGE => {
map_host_errors(self.write_log_message(args.nth_checked(0)?, args.nth_checked(1)?))
}
STORAGE_GET_ITEM => map_host_errors(self.storage_get_item(
args.nth_checked(0)?,
args.nth_checked(1)?,
args.nth_checked(2)?,
args.nth_checked(3)?,
)),
_ => {
let mut extensions_indices = self
.extensions_indices
.take()
.expect("no extensions_indices is set");
let extension = match extensions_indices.get_mut(&index) {
Some(extension) => Box::new(extension),
None => panic!("Unimplemented function at {}", index),
};
let result = map_host_errors(extension.invoke(self, args)?);
self.extensions_indices = Some(extensions_indices);
result
}
}
}
}
impl wasmi::ModuleImportResolver for WasmState {
fn resolve_func(
&self,
field_name: &str,
signature: &wasmi::Signature,
) -> Result<wasmi::FuncRef, wasmi::Error> {
// First look for the function (i.e., `field_name`) in the statically registered functions.
// If not found, then look for it among the extensions. If not found, return an error.
let (index, expected_signature) = match oak_functions_resolve_func(field_name) {
Some(sig) => sig,
None => match self.extensions_metadata.get(field_name) {
Some((ind, sig)) => (*ind, sig.clone()),
None => {
return Err(wasmi::Error::Instantiation(format!(
"Export {} not found",
field_name
)))
}
},
};
if signature != &expected_signature {
return Err(wasmi::Error::Instantiation(format!(
"Export `{}` doesn't match expected signature; got: {:?}, expected: {:?}",
field_name, signature, expected_signature
)));
}
Ok(wasmi::FuncInstance::alloc_host(expected_signature, index))
}
}
impl WasmState {
fn new(
module: &wasmi::Module,
request_bytes: Vec<u8>,
lookup_data: Arc<LookupData>,
logger: Logger,
extensions_indices: HashMap<usize, BoxedExtension>,
extensions_metadata: HashMap<String, (usize, wasmi::Signature)>,
) -> anyhow::Result<WasmState> {
let mut abi = WasmState {
request_bytes,
response_bytes: vec![],
lookup_data,
instance: None,
memory: None,
logger,
extensions_indices: Some(extensions_indices),
extensions_metadata,
};
let instance = wasmi::ModuleInstance::new(
module,
&wasmi::ImportsBuilder::new().with_resolver("oak_functions", &abi),
)
.map_err(|err| anyhow::anyhow!("failed to instantiate Wasm module: {:?}", err))?
.assert_no_start();
check_export_function_signature(
&instance,
MAIN_FUNCTION_NAME,
&wasmi::Signature::new(&[][..], None),
)
.context("could not validate `main` export")?;
check_export_function_signature(
&instance,
ALLOC_FUNCTION_NAME,
&wasmi::Signature::new(&[ValueType::I32][..], Some(ValueType::I32)),
)
.context(" could not validate `alloc` export")?;
abi.instance = Some(instance.clone());
// Make sure that non-empty `memory` is attached to the WasmState. Fail early if
// `memory` is not available.
abi.memory = Some(
instance
.export_by_name("memory")
.context("could not find Wasm `memory` export")?
.as_memory()
.cloned()
.context("could not interpret Wasm `memory` export as memory")?,
);
Ok(abi)
}
fn invoke(&mut self) {
let instance = self.instance.as_ref().expect("no instance").clone();
let result = instance.invoke_export(MAIN_FUNCTION_NAME, &[], self);
self.logger.log_sensitive(
Level::Info,
&format!(
"{:?}: Running Wasm module completed with result: {:?}",
std::thread::current().id(),
result
),
);
}
fn get_response_bytes(&self) -> Vec<u8> {
self.response_bytes.clone()
}
}
fn check_export_function_signature(
instance: &wasmi::ModuleInstance,
export_name: &str,
expected_signature: &wasmi::Signature,
) -> anyhow::Result<()> {
let export_function = instance
.export_by_name(export_name)
.context("could not find Wasm export")?
.as_func()
.cloned()
.context("could not interpret Wasm export as function")?;
if export_function.signature() != expected_signature {
anyhow::bail!(
"invalid signature for export: {:?}, expected: {:?}",
export_function.signature(),
expected_signature
);
} else {
Ok(())
}
}
/// Runs the given function and applies the given security policy to the execution of the function
/// and the response returned from it. Serializes and returns the response as a binary
/// protobuf-encoded byte array of a constant size.
///
/// If the execution of the `function` takes longer than allowed by the given security policy,
/// an error response with status `PolicyTimeViolation` is returned. If the size of the `body` in
/// the response returned by the `function` is larger than allowed by the security policy, the
/// response is discarded and a response with status `PolicySizeViolation` is returned instead.
/// In all cases, to keep the total size of the returned byte array constant, the `body` of the
/// response may be padded by a number of trailing 0s before encoding the response as a binary
/// protobuf message. In this case, the `length` in the response will contain the effective length
/// of the `body`. This response is guaranteed to comply with the policy's size restriction.
pub async fn apply_policy<F, S>(policy: ServerPolicy, function: F) -> anyhow::Result<Response>
where
F: std::marker::Send + 'static + FnOnce() -> S,
S: std::future::Future<Output = anyhow::Result<Response>> + std::marker::Send,
{
// Use tokio::spawn to actually run the tasks in parallel, for more accurate measurement
// of time.
let task = tokio::spawn(async move { function().await });
// Sleep until the policy times out
tokio::time::sleep(Duration::from_millis(
policy.constant_processing_time_ms.into(),
))
.await;
let function_response = task.now_or_never();
let response = match function_response {
// The `function` did not terminate within the policy timeout
None => Response::create(
StatusCode::PolicyTimeViolation,
"Reason: response not available.".as_bytes().to_vec(),
),
Some(response) => match response {
// `tokio::task::JoinError` when getting the response from the tokio task
Err(_tokio_err) => Response::create(
StatusCode::InternalServerError,
"Reason: internal server error.".as_bytes().to_vec(),
),
Ok(response) => match response {
// The `function` terminated with an error
Err(err) => Response::create(
StatusCode::InternalServerError,
err.to_string().as_bytes().to_vec(),
),
Ok(rsp) => rsp,
},
},
};
// Return an error response if the body of the response is larger than allowed by the policy.
let response = if response.body.len() > policy.constant_response_size_bytes as usize {
Response::create(
StatusCode::PolicySizeViolation,
"Reason: the response is too large.".as_bytes().to_vec(),
)
} else {
response
};
response.pad(
policy
.constant_response_size_bytes
.try_into()
.context("could not convert u64 to usize")?,
)
}
// An ephemeral request handler with a Wasm module for handling the requests.
#[derive(Clone)]
pub struct WasmHandler {
// Wasm module to be served on each invocation. `Arc` is needed to make `WasmHandler`
// cloneable.
module: Arc<wasmi::Module>,
lookup_data: Arc<LookupData>,
extension_factories: Arc<Vec<BoxedExtensionFactory>>,
logger: Logger,
}
impl WasmHandler {
pub fn create(
wasm_module_bytes: &[u8],
lookup_data: Arc<LookupData>,
extension_factories: Vec<BoxedExtensionFactory>,
logger: Logger,
) -> anyhow::Result<Self> {
let module = wasmi::Module::from_buffer(&wasm_module_bytes)
.map_err(|err| anyhow::anyhow!("could not load module from buffer: {:?}", err))?;
Ok(WasmHandler {
module: Arc::new(module),
lookup_data,
extension_factories: Arc::new(extension_factories),
logger,
})
}
pub async fn handle_invoke(&self, request: Request) -> anyhow::Result<Response> {
let mut extensions_indices = HashMap::new();
let mut extensions_metadata = HashMap::new();
for (ind, factory) in self.extension_factories.iter().enumerate() {
let extension = factory.create()?;
let (name, signature) = extension.get_metadata();
extensions_indices.insert(ind + EXTENSION_INDEX_OFFSET, extension);
extensions_metadata.insert(name, (ind + EXTENSION_INDEX_OFFSET, signature));
}
let request_bytes = request.body;
let mut wasm_state = WasmState::new(
&self.module,
request_bytes,
self.lookup_data.clone(),
self.logger.clone(),
extensions_indices,
extensions_metadata,
)?;
wasm_state.invoke();
for mut extension in wasm_state
.extensions_indices
.take()
.expect("no extensions_indices is set in wasm_state")
.into_values()
{
extension.terminate()?;
}
Ok(Response::create(
StatusCode::Success,
wasm_state.get_response_bytes(),
))
}
}
/// A resolver function, mapping `oak_functions` host function names to an index and a type
/// signature.
fn oak_functions_resolve_func(field_name: &str) -> Option<(usize, wasmi::Signature)> {
// The types in the signatures correspond to the parameters from
// oak_functions/abi/src/lib.rs
let (index, expected_signature) = match field_name {
"read_request" => (
READ_REQUEST,
wasmi::Signature::new(
&[
ABI_USIZE, // buf_ptr_ptr
ABI_USIZE, // buf_len_ptr
][..],
Some(ValueType::I32),
),
),
"write_response" => (
WRITE_RESPONSE,
wasmi::Signature::new(
&[
ABI_USIZE, // buf_ptr
ABI_USIZE, // buf_len
][..],
Some(ValueType::I32),
),
),
"write_log_message" => (
WRITE_LOG_MESSAGE,
wasmi::Signature::new(
&[
ABI_USIZE, // buf_ptr
ABI_USIZE, // buf_len
][..],
Some(ValueType::I32),
),
),
"storage_get_item" => (
STORAGE_GET_ITEM,
wasmi::Signature::new(
&[
ABI_USIZE, // key_ptr
ABI_USIZE, // key_len
ABI_USIZE, // value_ptr_ptr
ABI_USIZE, // value_len_ptr
][..],
Some(ValueType::I32),
),
),
_ => return None,
};
Some((index, expected_signature))
}
/// A helper function to move between our specific result type `Result<(), OakStatus>` and the
/// `wasmi` specific result type `Result<Option<wasmi::RuntimeValue>, wasmi::Trap>`, mapping:
/// - `Ok(())` to `Ok(Some(OakStatus::Ok))`
/// - `Err(x)` to `Ok(Some(x))`
fn map_host_errors(
result: Result<(), OakStatus>,
) -> Result<Option<wasmi::RuntimeValue>, wasmi::Trap> {
Ok(Some(wasmi::RuntimeValue::I32(result.map_or_else(
|x: OakStatus| x as i32,
|_| OakStatus::Ok as i32,
))))
}
/// Converts a binary sequence to a string if it is a valid UTF-8 string, or formats it as a numeric
/// vector of bytes otherwise.
pub fn format_bytes(v: &[u8]) -> String {
std::str::from_utf8(v)
.map(|s| s.to_string())
.unwrap_or_else(|_| format!("{:?}", v))
}
| 38.082287 | 157 | 0.587548 |
bf0913ca8537f6921d50292f259cde203f7ba20c | 1,445 | use core::fmt;
use core::ops::{Deref, DerefMut};
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
// lines at a time, so we have to align to 128 bytes rather than 64.
//
// Sources:
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
pub(crate) struct CachePadded<T> {
value: T,
}
unsafe impl<T: Send> Send for CachePadded<T> {}
unsafe impl<T: Sync> Sync for CachePadded<T> {}
impl<T> CachePadded<T> {
pub(crate) fn new(t: T) -> CachePadded<T> {
CachePadded::<T> { value: t }
}
}
impl<T> Deref for CachePadded<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for CachePadded<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CachePadded")
.field("value", &self.value)
.finish()
}
}
impl<T> From<T> for CachePadded<T> {
fn from(t: T) -> Self {
CachePadded::new(t)
}
}
| 27.264151 | 120 | 0.63391 |
721488b823d390b585779f84b407af573988d335 | 2,228 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
pub mod emitter; // emitter is public API for mutating state
pub mod iterator;
use rx_rust as rx;
extern crate bitflags;
use bitflags::bitflags;
bitflags! {
pub struct Flags: u8 {
const NEEDS_LOCAL_THIS = 0b0000_0001;
const IN_TRY = 0b0000_0010;
const ALLOWS_ARRAY_APPEND = 0b0000_0100;
const IN_RX_BODY = 0b0000_1000;
}
}
pub struct Env {
pub flags: Flags,
// TODO(hrust)
// - pipe_var after porting Local
// - namespace after porting Namespace_env
// - jump_targets after porting Jump_targets
}
/// Builder for creating unique ids; e.g.
/// the OCaml function
/// Emit_env.get_unique_id_for_FOO
/// can be called in Rust via:
/// ```
/// UniqueIdBuilder::new().FOO("some_fun")
/// ```
pub struct UniqueIdBuilder {
id: String,
}
impl UniqueIdBuilder {
pub fn new() -> UniqueIdBuilder {
UniqueIdBuilder { id: "|".to_owned() }
}
pub fn main(self) -> String {
self.id
}
pub fn function(mut self, fun_name: &str) -> String {
self.id.push_str(fun_name);
self.id
}
pub fn method(self, class_name: &str, meth_name: &str) -> String {
let mut ret = class_name.to_owned();
ret.push_str(&self.id);
ret.push_str(meth_name);
ret
}
}
pub type SMap<T> = std::collections::BTreeMap<String, T>;
pub type SSet = std::collections::BTreeSet<String>;
#[derive(Default, Debug)]
pub struct GlobalState {
pub explicit_use_set: SSet,
// closure_namespaces: SMap<NamespaceEnv>, // TODO(hrust) use oxidized
// closure_enclosing_classes // TODO(hrust) need Tast
pub function_to_labels_map: SMap<SMap<bool>>,
pub lambda_rx_of_scope: SMap<rx::Level>,
}
impl GlobalState {
pub fn get_lambda_rx_of_scope(&self, class_name: &str, meth_name: &str) -> rx::Level {
let key = UniqueIdBuilder::new().method(class_name, meth_name);
*self
.lambda_rx_of_scope
.get(&key)
.unwrap_or(&rx::Level::NonRx)
}
}
| 27.506173 | 90 | 0.638241 |
91716f93265fd63e9f401919aaa01ea317721886 | 7,316 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Contains support for parsing and writing Ethernet frames. Does not currently offer support for
//! 802.1Q tags.
use std::result::Result;
use super::bytes::{InnerBytes, NetworkBytes, NetworkBytesMut};
use super::Incomplete;
use crate::MacAddr;
const DST_MAC_OFFSET: usize = 0;
const SRC_MAC_OFFSET: usize = 6;
const ETHERTYPE_OFFSET: usize = 12;
// We don't support 802.1Q tags.
// TODO: support 802.1Q tags?! If so, don't forget to change the speculative_test_* functions
// for ARP and IPv4.
/// Payload offset in an ethernet frame
pub const PAYLOAD_OFFSET: usize = 14;
/// Ethertype value for ARP frames.
pub const ETHERTYPE_ARP: u16 = 0x0806;
/// Ethertype value for IPv4 packets.
pub const ETHERTYPE_IPV4: u16 = 0x0800;
/// Describes the errors which may occur when handling Ethernet frames.
#[derive(Debug, PartialEq)]
pub enum Error {
/// The specified byte sequence is shorter than the Ethernet header length.
SliceTooShort,
}
/// Interprets the inner bytes as an Ethernet frame.
pub struct EthernetFrame<'a, T: 'a> {
bytes: InnerBytes<'a, T>,
}
#[allow(clippy::len_without_is_empty)]
impl<'a, T: NetworkBytes> EthernetFrame<'a, T> {
/// Interprets `bytes` as an Ethernet frame without any validity checks.
///
/// # Panics
///
/// This method does not panic, but further method calls on the resulting object may panic if
/// `bytes` contains invalid input.
#[inline]
pub fn from_bytes_unchecked(bytes: T) -> Self {
EthernetFrame {
bytes: InnerBytes::new(bytes),
}
}
/// Checks whether the specified byte sequence can be interpreted as an Ethernet frame.
#[inline]
pub fn from_bytes(bytes: T) -> Result<Self, Error> {
if bytes.len() < PAYLOAD_OFFSET {
return Err(Error::SliceTooShort);
}
Ok(EthernetFrame::from_bytes_unchecked(bytes))
}
/// Returns the destination MAC address.
#[inline]
pub fn dst_mac(&self) -> MacAddr {
MacAddr::from_bytes_unchecked(&self.bytes[DST_MAC_OFFSET..SRC_MAC_OFFSET])
}
/// Returns the source MAC address.
#[inline]
pub fn src_mac(&self) -> MacAddr {
MacAddr::from_bytes_unchecked(&self.bytes[SRC_MAC_OFFSET..ETHERTYPE_OFFSET])
}
/// Returns the ethertype of the frame.
#[inline]
pub fn ethertype(&self) -> u16 {
self.bytes.ntohs_unchecked(ETHERTYPE_OFFSET)
}
/// Returns the offset of the payload within the frame.
#[inline]
pub fn payload_offset(&self) -> usize {
PAYLOAD_OFFSET
}
/// Returns the payload of the frame as an `[&u8]` slice.
#[inline]
pub fn payload(&self) -> &[u8] {
self.bytes.split_at(self.payload_offset()).1
}
/// Returns the length of the frame.
#[inline]
pub fn len(&self) -> usize {
self.bytes.len()
}
}
impl<'a, T: NetworkBytesMut> EthernetFrame<'a, T> {
/// Attempts to write an Ethernet frame using the given header fields to `buf`.
fn new_with_header(
buf: T,
dst_mac: MacAddr,
src_mac: MacAddr,
ethertype: u16,
) -> Result<Self, Error> {
if buf.len() < PAYLOAD_OFFSET {
return Err(Error::SliceTooShort);
}
let mut frame = EthernetFrame::from_bytes_unchecked(buf);
frame
.set_dst_mac(dst_mac)
.set_src_mac(src_mac)
.set_ethertype(ethertype);
Ok(frame)
}
/// Attempts to write an incomplete Ethernet frame (whose length is currently unknown) to `buf`,
/// using the specified header fields.
#[inline]
pub fn write_incomplete(
buf: T,
dst_mac: MacAddr,
src_mac: MacAddr,
ethertype: u16,
) -> Result<Incomplete<Self>, Error> {
Ok(Incomplete::new(Self::new_with_header(
buf, dst_mac, src_mac, ethertype,
)?))
}
/// Sets the destination MAC address.
#[inline]
pub fn set_dst_mac(&mut self, addr: MacAddr) -> &mut Self {
self.bytes[DST_MAC_OFFSET..SRC_MAC_OFFSET].copy_from_slice(addr.get_bytes());
self
}
/// Sets the source MAC address.
#[inline]
pub fn set_src_mac(&mut self, addr: MacAddr) -> &mut Self {
self.bytes[SRC_MAC_OFFSET..ETHERTYPE_OFFSET].copy_from_slice(addr.get_bytes());
self
}
/// Sets the ethertype of the frame.
#[inline]
pub fn set_ethertype(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(ETHERTYPE_OFFSET, value);
self
}
/// Returns the payload of the frame as a `&mut [u8]` slice.
#[inline]
pub fn payload_mut(&mut self) -> &mut [u8] {
// We need this let to avoid confusing the borrow checker.
let offset = self.payload_offset();
self.bytes.split_at_mut(offset).1
}
}
impl<'a, T: NetworkBytes> Incomplete<EthernetFrame<'a, T>> {
/// Completes the inner frame by shrinking it to its actual length.
///
/// # Panics
///
/// This method panics if `len` is greater than the length of the inner byte sequence.
#[inline]
pub fn with_payload_len_unchecked(mut self, payload_len: usize) -> EthernetFrame<'a, T> {
let payload_offset = self.inner.payload_offset();
self.inner
.bytes
.shrink_unchecked(payload_offset + payload_len);
self.inner
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fmt;
impl<'a, T: NetworkBytes> fmt::Debug for EthernetFrame<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(Ethernet frame)")
}
}
#[test]
fn test_ethernet_frame() {
let mut a = [0u8; 10000];
let mut bad_array = [0u8; 1];
let dst_mac = MacAddr::parse_str("01:23:45:67:89:ab").unwrap();
let src_mac = MacAddr::parse_str("cd:ef:01:23:45:67").unwrap();
let ethertype = 1289;
assert_eq!(
EthernetFrame::from_bytes(bad_array.as_ref()).unwrap_err(),
Error::SliceTooShort
);
assert_eq!(
EthernetFrame::new_with_header(bad_array.as_mut(), dst_mac, src_mac, ethertype)
.unwrap_err(),
Error::SliceTooShort
);
{
let mut f1 =
EthernetFrame::new_with_header(a.as_mut(), dst_mac, src_mac, ethertype).unwrap();
assert_eq!(f1.dst_mac(), dst_mac);
assert_eq!(f1.src_mac(), src_mac);
assert_eq!(f1.ethertype(), ethertype);
f1.payload_mut()[1] = 132;
}
{
let f2 = EthernetFrame::from_bytes(a.as_ref()).unwrap();
assert_eq!(f2.dst_mac(), dst_mac);
assert_eq!(f2.src_mac(), src_mac);
assert_eq!(f2.ethertype(), ethertype);
assert_eq!(f2.payload()[1], 132);
assert_eq!(f2.len(), f2.bytes.len());
}
{
let f3 =
EthernetFrame::write_incomplete(a.as_mut(), dst_mac, src_mac, ethertype).unwrap();
let f3_complete = f3.with_payload_len_unchecked(123);
assert_eq!(f3_complete.len(), f3_complete.payload_offset() + 123);
}
}
}
| 30.106996 | 100 | 0.610169 |
616a26fdc83d8bb88ad9df8f52df665f991914fe | 2,808 | use crate::{lexer::Token, SyntaxKind, SyntaxKind::EOF, TextRange, TextUnit};
use std::ops::{Add, AddAssign};
pub(crate) struct ParserInput<'t> {
text: &'t str,
/// start position of each token(expect whitespace and comment)
/// ```non-rust
/// struct Foo;
/// ^------^---
/// | | ^-
/// 0 7 10
/// ```
/// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]`
start_offsets: Vec<TextUnit>,
/// non-whitespace/comment tokens
/// ```non-rust
/// struct Foo {}
/// ^^^^^^ ^^^ ^^
/// ```
/// tokens: `[struct, Foo, {, }]`
tokens: Vec<Token>,
}
impl<'t> ParserInput<'t> {
/// Generate input from tokens(expect comment and whitespace).
pub fn new(text: &'t str, raw_tokens: &'t [Token]) -> ParserInput<'t> {
let mut tokens = Vec::new();
let mut start_offsets = Vec::new();
let mut len = 0.into();
for &token in raw_tokens.iter() {
if !token.kind.is_trivia() {
tokens.push(token);
start_offsets.push(len);
}
len += token.len;
}
ParserInput { text, start_offsets, tokens }
}
/// Get the syntax kind of token at given input position.
pub fn kind(&self, pos: InputPosition) -> SyntaxKind {
let idx = pos.0 as usize;
if !(idx < self.tokens.len()) {
return EOF;
}
self.tokens[idx].kind
}
/// Get the length of a token at given input position.
pub fn token_len(&self, pos: InputPosition) -> TextUnit {
let idx = pos.0 as usize;
if !(idx < self.tokens.len()) {
return 0.into();
}
self.tokens[idx].len
}
/// Get the start position of a taken at given input position.
pub fn token_start_at(&self, pos: InputPosition) -> TextUnit {
let idx = pos.0 as usize;
if !(idx < self.tokens.len()) {
return 0.into();
}
self.start_offsets[idx]
}
/// Get the raw text of a token at given input position.
pub fn token_text(&self, pos: InputPosition) -> &'t str {
let idx = pos.0 as usize;
if !(idx < self.tokens.len()) {
return "";
}
let range = TextRange::offset_len(self.start_offsets[idx], self.tokens[idx].len);
&self.text[range]
}
}
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub(crate) struct InputPosition(u32);
impl InputPosition {
pub fn new() -> Self {
InputPosition(0)
}
}
impl Add<u32> for InputPosition {
type Output = InputPosition;
fn add(self, rhs: u32) -> InputPosition {
InputPosition(self.0 + rhs)
}
}
impl AddAssign<u32> for InputPosition {
fn add_assign(&mut self, rhs: u32) {
self.0 += rhs
}
}
| 27.529412 | 89 | 0.540598 |
1c49def1330c884da8169e2c97613a8b46d78f8e | 717 | // Copyright (C) 2020 ADVANCA PTE. LTD.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use sgx_types::*;
extern "C" {
} | 39.833333 | 73 | 0.743375 |
9b8262b720b93214d518521a954dce3f52d082d2 | 4,565 | use std::sync::Arc;
use crate::context::ParserContext;
use crate::io::{Reader, Span};
use crate::parsers::commons::identifier::Identifier;
use crate::parsers::commons::whitespaces::Whitespace;
use crate::parsers::expressions::Expression;
use crate::parsers::result::ParserResult;
use crate::parsers::utils::{cursor_manager, generate_error_log, generate_source_code};
use crate::parsers::ParserResultError;
use crate::{ParserError, ParserNode};
static KEYWORD: &str = "return";
/// A return statement with a compulsory expression.
#[derive(Debug)]
pub struct ReturnStatement {
span: Arc<Span>,
expression: Arc<Expression>,
pre_expression_whitespace: Arc<Whitespace>,
}
impl ReturnStatement {
// GETTERS ----------------------------------------------------------------
pub fn expression(&self) -> &Expression {
&self.expression
}
pub fn pre_expression_whitespace(&self) -> &Arc<Whitespace> {
&self.pre_expression_whitespace
}
// STATIC METHODS ---------------------------------------------------------
/// Parses a return statement.
pub fn parse(
reader: &mut Reader,
context: &mut ParserContext,
) -> ParserResult<ReturnStatement> {
cursor_manager(reader, |reader, init_cursor| {
if !Identifier::parse_keyword(reader, context, KEYWORD) {
return Err(ParserResultError::NotFound);
}
let pre_expression_whitespace = Whitespace::parse_multiline_or_default(reader, context);
let expression = match Expression::parse(reader, context) {
Ok(v) => v,
Err(_) => {
context.add_message(generate_error_log(
ParserError::MissingExpressionInReturnStatement,
"An expression was expected to specify the value to return".to_string(),
|log| {
generate_source_code(log, &reader, |doc| {
doc.highlight_cursor_str(
reader.byte_offset(),
Some("Insert an expression here"),
None,
)
})
},
));
return Err(ParserResultError::Error);
}
};
let span = Arc::new(reader.substring_to_current(&init_cursor));
Ok(ReturnStatement {
span,
expression: Arc::new(expression),
pre_expression_whitespace: Arc::new(pre_expression_whitespace),
})
})
}
}
impl ParserNode for ReturnStatement {
fn span(&self) -> &Arc<Span> {
&self.span
}
}
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use crate::test::{assert_error, assert_not_found};
use crate::ParserError;
use super::*;
#[test]
fn test_parse() {
// With whitespaces.
let mut reader = Reader::from_str("return test");
let mut context = ParserContext::default();
let statement =
ReturnStatement::parse(&mut reader, &mut context).expect("The parser must succeed");
if let Expression::VariableAccess(identifier) = statement.expression.as_ref() {
assert_eq!(
identifier.content(),
"test",
"The literal access is incorrect"
);
} else {
panic!("The literal is incorrect");
}
}
#[test]
fn test_parse_err_not_found() {
let mut reader = Reader::from_str("-");
let mut context = ParserContext::default();
let error = ReturnStatement::parse(&mut reader, &mut context)
.expect_err("The parser must not succeed");
assert_not_found(&context, &error, 0);
}
#[test]
fn test_parse_err_missing_expression() {
let mut reader = Reader::from_str("return");
let mut context = ParserContext::default();
let error = ReturnStatement::parse(&mut reader, &mut context)
.expect_err("The parser must not succeed");
assert_error(
&context,
&error,
ParserError::MissingExpressionInReturnStatement,
);
}
}
| 32.841727 | 100 | 0.518072 |
d98951697111cfcaef7bb7c3bf1a0c8e38aac0d2 | 10,405 | use crate::rendering::*;
use crate::{EmeraldError, Sound, SoundKey};
use miniquad::Context;
use std::collections::HashMap;
const INITIAL_TEXTURE_STORAGE_CAPACITY: usize = 100;
const INITIAL_FONT_STORAGE_CAPACITY: usize = 100;
const DEFAULT_ASSET_FOLDER: &str = "./assets/";
/// Default to storing user data in the application directory.
/// Note: This will destroy any user/save files if the game is re-installed.
const DEFAULT_USER_DATA_FOLDER: &str = "./";
// const INITIAL_SOUND_STORAGE_CAPACITY: usize = 100;
/// The AssetStore stores all Textures, Fonts, and Audio for the game.
/// It stores the data contiguously, and does caching internally.
/// Assets can be loaded via the `AssetLoader` and inserted into the AssetStore.
/// Assets can be manually removed from the store if memory management becomes a concern.
pub(crate) struct AssetStore {
bytes: HashMap<String, Vec<u8>>,
fonts: Vec<Font>,
fontdue_fonts: Vec<fontdue::Font>,
textures: Vec<Texture>,
fontdue_key_map: HashMap<FontKey, usize>,
font_key_map: HashMap<FontKey, usize>,
pub texture_key_map: HashMap<TextureKey, usize>,
pub sound_map: HashMap<SoundKey, Sound>,
asset_folder_root: String,
user_data_folder_root: String,
#[cfg(feature="hotreload")]
pub(crate) file_hot_reload_metadata: HashMap<String, crate::assets::hotreload::HotReloadMetadata>,
}
impl AssetStore {
pub fn new(ctx: &mut Context) -> Self {
let mut texture_key_map = HashMap::new();
let default_texture = Texture::default(ctx).unwrap();
texture_key_map.insert(TextureKey::default(), 0);
let mut textures = Vec::with_capacity(INITIAL_TEXTURE_STORAGE_CAPACITY);
textures.push(default_texture);
let asset_folder_root = String::from(DEFAULT_ASSET_FOLDER);
let user_data_folder_root = String::from(DEFAULT_USER_DATA_FOLDER);
AssetStore {
bytes: HashMap::new(),
fontdue_fonts: Vec::with_capacity(INITIAL_FONT_STORAGE_CAPACITY),
fonts: Vec::with_capacity(INITIAL_FONT_STORAGE_CAPACITY),
textures,
fontdue_key_map: HashMap::new(),
font_key_map: HashMap::new(),
texture_key_map,
sound_map: HashMap::new(),
asset_folder_root,
user_data_folder_root,
#[cfg(feature="hotreload")]
file_hot_reload_metadata: HashMap::new(),
}
}
pub fn set_asset_folder_root(&mut self, root: String) {
self.asset_folder_root = root;
}
pub fn set_user_data_folder_root(&mut self, root: String) {
self.user_data_folder_root = root;
}
pub fn get_asset_folder_root(&mut self) -> String {
self.asset_folder_root.clone()
}
pub fn get_user_data_folder_root(&mut self) -> String {
self.user_data_folder_root.clone()
}
pub fn insert_asset_bytes(&mut self, relative_path: String, bytes: Vec<u8>) -> Result<(), EmeraldError> {
let path = self.get_full_asset_path(&relative_path);
self.bytes.insert(path, bytes);
Ok(())
}
pub fn get_asset_bytes(&mut self, relative_path: &String) -> Option<Vec<u8>> {
let full_path = self.get_full_asset_path(relative_path);
self.get_bytes(full_path)
}
pub fn read_asset_file(&mut self, relative_path: &String) -> Result<Vec<u8>, EmeraldError> {
let full_path = self.get_full_asset_path(relative_path);
read_file(&full_path)
}
pub fn _insert_user_bytes(&mut self, relative_path: String, bytes: Vec<u8>) -> Result<(), EmeraldError> {
let path = self.get_full_user_data_path(&relative_path);
self.bytes.insert(path, bytes);
Ok(())
}
pub fn get_user_bytes(&mut self, relative_path: &String) -> Option<Vec<u8>> {
let full_path = self.get_full_user_data_path(relative_path);
self.get_bytes(full_path)
}
pub fn read_user_file(&mut self, relative_path: &String) -> Result<Vec<u8>, EmeraldError> {
let full_path = self.get_full_user_data_path(relative_path);
read_file(&full_path)
}
fn get_bytes(&mut self, path: String) -> Option<Vec<u8>> {
if let Some(bytes) = self.bytes.get(&path) {
return Some(bytes.clone());
}
None
}
pub fn insert_fontdue_font(&mut self, key: FontKey, font: fontdue::Font) {
self.fontdue_fonts.push(font);
self.fontdue_key_map
.insert(key, self.fontdue_fonts.len() - 1);
}
pub fn insert_font(
&mut self,
_ctx: &mut Context,
key: FontKey,
font: Font,
) -> Result<(), EmeraldError> {
self.fonts.push(font);
self.font_key_map.insert(key, self.fonts.len() - 1);
Ok(())
}
pub fn insert_texture(&mut self, key: TextureKey, texture: Texture) {
if let Some(_) = self.get_texture(&key) {
self.remove_texture(key.clone());
}
self.textures.push(texture);
self.texture_key_map.insert(key.clone(), self.textures.len() - 1);
#[cfg(feature="hotreload")]
crate::assets::hotreload::on_insert_texture(self, self.get_full_asset_path(&key.get_name()))
}
pub fn get_full_asset_path(&self, path: &String) -> String {
// If it already contains the correct directory then just return it
if path.contains(&self.asset_folder_root) {
return path.clone();
}
let mut full_path = self.asset_folder_root.clone();
full_path.push_str(path);
full_path
}
pub fn get_full_user_data_path(&self, path: &String) -> String {
// If it already contains the correct directory then just return it
if path.contains(&self.user_data_folder_root) {
return path.clone();
}
let mut full_path = self.user_data_folder_root.clone();
full_path.push_str(path);
full_path
}
pub fn get_fontdue_font(&self, key: &FontKey) -> Option<&fontdue::Font> {
if let Some(index) = self.fontdue_key_map.get(key) {
return self.fontdue_fonts.get(*index);
}
None
}
pub fn _get_fontdue_font_mut(&mut self, key: &FontKey) -> Option<&mut fontdue::Font> {
if let Some(index) = self.fontdue_key_map.get(key) {
return self.fontdue_fonts.get_mut(*index);
}
None
}
pub fn get_font(&self, key: &FontKey) -> Option<&Font> {
if let Some(index) = self.font_key_map.get(key) {
return self.fonts.get(*index);
}
None
}
pub fn get_font_mut(&mut self, key: &FontKey) -> Option<&mut Font> {
if let Some(index) = self.fontdue_key_map.get(key) {
return self.fonts.get_mut(*index);
}
None
}
pub fn get_texture(&self, key: &TextureKey) -> Option<&Texture> {
if let Some(index) = self.texture_key_map.get(key) {
return self.textures.get(*index);
}
None
}
pub fn _get_texture_mut(&mut self, key: &TextureKey) -> Option<&mut Texture> {
if let Some(index) = self.texture_key_map.get(key) {
return self.textures.get_mut(*index);
}
None
}
pub fn remove_texture(&mut self, key: TextureKey) -> Option<Texture> {
let mut i: i32 = -1;
if let Some(index) = self.texture_key_map.get(&key) {
i = *index as _;
}
if i >= 0 {
// No need to reset map if only the end texture is removed.
let reset_map = (i as usize) != self.textures.len();
self.texture_key_map.remove(&key);
let texture = self.textures.remove(i as _);
texture.inner.delete();
if reset_map {
self.update_texture_key_map();
}
return Some(texture);
}
None
}
#[inline]
pub fn update_texture_key_map(&mut self) {
self.texture_key_map = HashMap::with_capacity(self.textures.len());
let mut i = 0;
for texture in &self.textures {
self.texture_key_map.insert(texture.key.clone(), i);
i += 1;
}
}
#[inline]
pub fn update_font_texture(&mut self, mut ctx: &mut Context, key: &FontKey) {
if let Some(index) = self.font_key_map.get(key) {
if let Some(font) = self.fonts.get_mut(*index) {
if let Some(index) = self.texture_key_map.get(&font.font_texture_key) {
if let Some(font_texture) = self.textures.get_mut(*index) {
font_texture.update(&mut ctx, &font.font_image);
}
}
}
}
}
#[inline]
pub fn contains_sound(&self, key: &SoundKey) -> bool {
self.sound_map.contains_key(key)
}
#[inline]
pub fn insert_sound(&mut self, key: SoundKey, sound: Sound) {
self.sound_map.insert(key, sound);
}
}
#[cfg(target_arch = "wasm32")]
fn read_file(path: &str) -> Result<Vec<u8>, EmeraldError> {
Err(EmeraldError::new(format!(
"Unable to get bytes for {}",
path
)))
}
#[cfg(target_os = "android")]
fn read_file(path: &str) -> Result<Vec<u8>, EmeraldError> {
// Based on https://github.com/not-fl3/miniquad/blob/4be5328760ff356494caf59cc853bcb395bce5d2/src/fs.rs#L38-L53
let filename = std::ffi::CString::new(path).unwrap();
let mut data: sapp_android::android_asset = unsafe { std::mem::zeroed() };
unsafe { sapp_android::sapp_load_asset(filename.as_ptr(), &mut data as _) };
if data.content.is_null() == false {
let slice =
unsafe { std::slice::from_raw_parts(data.content, data.content_length as _) };
let response = slice.iter().map(|c| *c as _).collect::<Vec<_>>();
Ok(response)
} else {
Err(EmeraldError::new(format!("Unable to load asset `{}`", path)))
}
}
#[cfg(not(any(target_arch = "wasm32", target_os = "android")))]
fn read_file(path: &str) -> Result<Vec<u8>, EmeraldError> {
use std::fs::File;
use std::io::Read;
let current_dir = std::env::current_dir()?;
let path = current_dir.join(path);
let path = path.into_os_string().into_string()?;
let mut contents = vec![];
let mut file = File::open(path)?;
file.read_to_end(&mut contents)?;
Ok(contents)
}
| 30.693215 | 115 | 0.614128 |
29b1dcf7196d2101d26dcf42b5f8598befd9a3d0 | 47,433 | //! Management of outgoing connections.
//!
//! This module implements outgoing connection management, decoupled from the underlying transport
//! or any higher-level level parts. It encapsulates the reconnection and blocklisting logic on the
//! `SocketAddr` level.
//!
//! # Basic structure
//!
//! Core of this module is the `OutgoingManager`, which supports the following functionality:
//!
//! * Handed a `SocketAddr`s via the `learn_addr` function, it will permanently maintain a
//! connection to the given address, only giving up if retry thresholds are exceeded, after which
//! it will be forgotten.
//! * `block_addr` and `redeem_addr` can be used to maintain a `SocketAddr`-keyed block list.
//! * `OutgoingManager` maintains an internal routing table. The `get_route` function can be used to
//! retrieve a "route" (typically a `sync::channel` accepting network messages) to a remote peer
//! by `NodeId`.
//!
//! # Requirements
//!
//! `OutgoingManager` is decoupled from the underlying protocol, all of its interactions are
//! performed through [`DialRequest`] and [`DialOutcome`]s. This frees the `OutgoingManager` from
//! having to worry about protocol specifics.
//!
//! Three conditions not expressed in code must be fulfilled for the `OutgoingManager` to function:
//!
//! * The `Dialer` is expected to produce `DialOutcomes` for every dial [`DialRequest::Dial`]
//! eventually. These must be forwarded to the `OutgoingManager` via the `handle_dial_outcome`
//! function.
//! * The `perform_housekeeping` method must be called periodically to give the the
//! `OutgoingManager` a chance to initiate reconnections and collect garbage.
//! * When a connection is dropped, the connection manager must be notified via
//! `handle_connection_drop`.
//!
//! # Lifecycle
//!
//! The following chart illustrates the lifecycle of an outgoing connection.
//!
//! ```text
//! learn
//! ┌────────────── unknown/forgotten
//! │ ┌───────────► (implicit state)
//! │ │
//! │ │ exceed fail │
//! │ │ limit │ block
//! │ │ │
//! │ │ │
//! │ │ ▼
//! ┌─────────┐ fail, │ │ ┌─────────┐
//! │ │ sweep │ │ block │ │
//! │ Waiting │◄───────┐ │ │ ┌─────►│ Blocked │◄──────────┐
//! ┌───┤ │ │ │ │ │ │ │ │
//! │ └────┬────┘ │ │ │ │ └────┬────┘ │
//! │ block │ │ │ │ │ │ │
//! │ │ timeout │ ▼ │ │ │ redeem, │
//! │ │ ┌────┴─────┴───┐ │ block timeout │
//! │ │ │ │ │ │
//! │ └───────►│ Connecting │◄──────┘ │
//! │ │ │ │
//! │ └─────┬────┬───┘ │
//! │ │ ▲ │ │
//! │ success │ │ │ detect │
//! │ │ │ │ ┌──────────┐ │
//! │ ┌───────────┐ │ │ │ │ │ │
//! │ │ │◄────────┘ │ │ │ Loopback │ │
//! │ │ Connected │ │ └─────►│ │ │
//! │ │ │ dropped │ └──────────┘ │
//! │ └─────┬─────┴───────────┘ │
//! │ │ │
//! │ │ block │
//! └───────┴─────────────────────────────────────────────────┘
//!
//! # Timeouts/safety
//!
//! The `sweep` transition for connections usually does not happen during normal operations. Three
//! causes are typical for it:
//!
//! * A configured TCP timeout above [`OutgoingConfig::sweep_timeout`].
//! * Very slow responses from remote peers (similar to a Slowloris-attack)
//! * Faulty handling by the driver of the [`OutgoingManager`], i.e. the outside component.
//!
//! Should a dial attempt exceed a certain timeout, it is considered failed and put into the waiting
//! state again.
//!
//! If a conflict (multiple successful dial results) occurs, the more recent connection takes
//! precedence over the previous one. This prevents problems when a notification of a terminated
//! connection is overtaken by the new connection announcement.
//! ```
// Clippy has a lot of false positives due to `span.clone()`-closures.
#![allow(clippy::redundant_clone)]
use std::{
collections::{hash_map::Entry, HashMap},
error::Error,
fmt::{self, Debug, Display, Formatter},
mem,
net::SocketAddr,
time::{Duration, Instant},
};
use datasize::DataSize;
use tracing::{debug, error_span, field::Empty, info, trace, warn, Span};
use super::{display_error, NodeId};
/// An outgoing connection/address in various states.
#[derive(DataSize, Debug)]
pub struct Outgoing<H, E>
where
H: DataSize,
E: DataSize,
{
/// Whether or not the address is unforgettable, see `learn_addr` for details.
is_unforgettable: bool,
/// The current state the connection/address is in.
state: OutgoingState<H, E>,
}
/// Active state for a connection/address.
#[derive(DataSize, Debug)]
pub enum OutgoingState<H, E>
where
H: DataSize,
E: DataSize,
{
/// The outgoing address has been known for the first time and we are currently connecting.
Connecting {
/// Number of attempts that failed, so far.
failures_so_far: u8,
/// Time when the connection attempt was instantiated.
since: Instant,
},
/// The connection has failed at least one connection attempt and is waiting for a retry.
Waiting {
/// Number of attempts that failed, so far.
failures_so_far: u8,
/// The most recent connection error.
///
/// If not given, the connection was put into a `Waiting` state due to a sweep timeout.
error: Option<E>,
/// The precise moment when the last connection attempt failed.
last_failure: Instant,
},
/// An established outgoing connection.
Connected {
/// The peers remote ID.
peer_id: NodeId,
/// Handle to a communication channel that can be used to send data to the peer.
///
/// Can be a channel to decouple sending, or even a direct connection handle.
handle: H,
},
/// The address was blocked and will not be retried.
Blocked { since: Instant },
/// The address is owned by ourselves and will not be tried again.
Loopback,
}
impl<H, E> Display for OutgoingState<H, E>
where
H: DataSize,
E: DataSize,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
OutgoingState::Connecting {
failures_so_far, ..
} => {
write!(f, "connecting({})", failures_so_far)
}
OutgoingState::Waiting {
failures_so_far, ..
} => write!(f, "waiting({})", failures_so_far),
OutgoingState::Connected { .. } => write!(f, "connected"),
OutgoingState::Blocked { .. } => write!(f, "blocked"),
OutgoingState::Loopback => write!(f, "loopback"),
}
}
}
/// The result of dialing `SocketAddr`.
#[derive(Debug)]
pub enum DialOutcome<H, E> {
/// A connection was successfully established.
Successful {
/// The address dialed.
addr: SocketAddr,
/// A handle to send data down the connection.
handle: H,
/// The remote peer's authenticated node ID.
node_id: NodeId,
},
/// The connection attempt failed.
Failed {
/// The address dialed.
addr: SocketAddr,
/// The error encountered while dialing.
error: E,
/// The moment the connection attempt failed.
when: Instant,
},
/// The connection was aborted, because the remote peer turned out to be a loopback.
Loopback {
/// The address used to connect.
addr: SocketAddr,
},
}
impl<H, E> DialOutcome<H, E> {
/// Retrieves the socket address from the `DialOutcome`.
fn addr(&self) -> SocketAddr {
match self {
DialOutcome::Successful { addr, .. } => *addr,
DialOutcome::Failed { addr, .. } => *addr,
DialOutcome::Loopback { addr, .. } => *addr,
}
}
}
/// A request made for dialing.
#[derive(Clone, Debug)]
#[must_use]
pub(crate) enum DialRequest<H> {
/// Attempt to connect to the outgoing socket address.
///
/// For every time this request is emitted, there must be a corresponding call to
/// `handle_dial_outcome` eventually.
///
/// Any logging of connection issues should be done in the context of `span` for better log
/// output.
Dial { addr: SocketAddr, span: Span },
/// Disconnects a potentially existing connection.
///
/// Used when a peer has been blocked or should be disconnected for other reasons.
Disconnect { handle: H, span: Span },
}
impl<H> Display for DialRequest<H>
where
H: Display,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
DialRequest::Dial { addr, .. } => {
write!(f, "dial: {}", addr)
}
DialRequest::Disconnect { handle, .. } => {
write!(f, "disconnect: {}", handle)
}
}
}
}
#[derive(DataSize, Debug)]
/// Connection settings for the outgoing connection manager.
pub struct OutgoingConfig {
/// The maximum number of attempts before giving up and forgetting an address, if permitted.
pub(crate) retry_attempts: u8,
/// The basic time slot for exponential backoff when reconnecting.
pub(crate) base_timeout: Duration,
/// Time until an outgoing address is unblocked.
pub(crate) unblock_after: Duration,
/// Safety timeout, after which a connection is no longer expected to finish dialing.
pub(crate) sweep_timeout: Duration,
}
impl OutgoingConfig {
/// Calculates the backoff time.
///
/// `failed_attempts` (n) is the number of previous attempts *before* the current failure (thus
/// starting at 0). The backoff time will be double for each attempt.
fn calc_backoff(&self, failed_attempts: u8) -> Duration {
2u32.pow(failed_attempts as u32) * self.base_timeout
}
}
/// Manager of outbound connections.
///
/// See the module documentation for usage suggestions.
#[derive(DataSize, Debug)]
pub struct OutgoingManager<H, E>
where
H: DataSize,
E: DataSize,
{
/// Outgoing connections subsystem configuration.
config: OutgoingConfig,
/// Mapping of address to their current connection state.
outgoing: HashMap<SocketAddr, Outgoing<H, E>>,
/// Routing table.
///
/// Contains a mapping from node IDs to connected socket addresses. A missing entry means that
/// the destination is not connected.
routes: HashMap<NodeId, SocketAddr>,
}
impl<H, E> OutgoingManager<H, E>
where
H: DataSize,
E: DataSize,
{
/// Creates a new outgoing manager.
pub(crate) fn new(config: OutgoingConfig) -> Self {
Self {
config,
outgoing: Default::default(),
routes: Default::default(),
}
}
}
/// Creates a logging span for a specific connection.
#[inline]
fn make_span<H, E>(addr: SocketAddr, outgoing: Option<&Outgoing<H, E>>) -> Span
where
H: DataSize,
E: DataSize,
{
// Note: The jury is still out on whether we want to create a single span per connection and
// cache it, or create a new one (with the same connection ID) each time this is called. The
// advantage of the former is external tools have it easier correlating all related
// information, while the drawback is not being able to change the parent span link, which
// might be awkward.
if let Some(outgoing) = outgoing {
match outgoing.state {
OutgoingState::Connected { peer_id, .. } => {
error_span!("outgoing", %addr, state=%outgoing.state, %peer_id, validator_id=Empty)
}
_ => {
error_span!("outgoing", %addr, state=%outgoing.state, peer_id=Empty, validator_id=Empty)
}
}
} else {
error_span!("outgoing", %addr, state = "-")
}
}
impl<H, E> OutgoingManager<H, E>
where
H: DataSize + Clone,
E: DataSize + Error,
{
/// Changes the state of an outgoing connection.
///
/// Will trigger an update of the routing table if necessary. Does not emit any other
/// side-effects.
fn change_outgoing_state(
&mut self,
addr: SocketAddr,
mut new_state: OutgoingState<H, E>,
) -> &mut Outgoing<H, E> {
let (prev_state, new_outgoing) = match self.outgoing.entry(addr) {
Entry::Vacant(vacant) => {
let inserted = vacant.insert(Outgoing {
state: new_state,
is_unforgettable: false,
});
(None, inserted)
}
Entry::Occupied(occupied) => {
let prev = occupied.into_mut();
mem::swap(&mut prev.state, &mut new_state);
// `new_state` and `prev.state` are swapped now.
(Some(new_state), prev)
}
};
// Update the routing table.
match (&prev_state, &new_outgoing.state) {
(Some(OutgoingState::Connected { .. }), OutgoingState::Connected { .. }) => {
trace!("route unchanged, already connected");
}
// Dropping from connected to any other state requires clearing the route.
(Some(OutgoingState::Connected { peer_id, .. }), _) => {
debug!(%peer_id, "route removed");
self.routes.remove(peer_id);
}
// Otherwise we have established a new route.
(_, OutgoingState::Connected { peer_id, .. }) => {
debug!(%peer_id, "route added");
self.routes.insert(*peer_id, addr);
}
_ => {
trace!("route unchanged");
}
}
new_outgoing
}
/// Retrieves the address by peer.
pub(crate) fn get_addr(&self, peer_id: NodeId) -> Option<SocketAddr> {
self.routes.get(&peer_id).copied()
}
/// Retrieves a handle to a peer.
///
/// Primary function to send data to peers; clients retrieve a handle to it which can then
/// be used to send data.
pub(crate) fn get_route(&self, peer_id: NodeId) -> Option<&H> {
let outgoing = self.outgoing.get(self.routes.get(&peer_id)?)?;
if let OutgoingState::Connected { ref handle, .. } = outgoing.state {
Some(handle)
} else {
None
}
}
/// Iterates over all connected peer IDs.
#[allow(clippy::needless_lifetimes)]
pub(crate) fn connected_peers<'a>(&'a self) -> impl Iterator<Item = NodeId> + 'a {
self.routes.keys().cloned()
}
/// Notify about a potentially new address that has been discovered.
///
/// Immediately triggers the connection process to said address if it was not known before.
///
/// A connection marked `unforgettable` will never be evicted but reset instead when it exceeds
/// the retry limit.
pub(crate) fn learn_addr(
&mut self,
addr: SocketAddr,
unforgettable: bool,
now: Instant,
) -> Option<DialRequest<H>> {
let span = make_span(addr, self.outgoing.get(&addr));
span.clone()
.in_scope(move || match self.outgoing.entry(addr) {
Entry::Occupied(_) => {
debug!("ignoring already known address");
None
}
Entry::Vacant(_vacant) => {
info!("connecting to newly learned address");
let outgoing = self.change_outgoing_state(
addr,
OutgoingState::Connecting {
failures_so_far: 0,
since: now,
},
);
if outgoing.is_unforgettable != unforgettable {
outgoing.is_unforgettable = unforgettable;
debug!(unforgettable, "marked");
}
Some(DialRequest::Dial { addr, span })
}
})
}
/// Blocks an address.
///
/// Causes any current connection to the address to be terminated and future ones prohibited.
pub(crate) fn block_addr(&mut self, addr: SocketAddr, now: Instant) -> Option<DialRequest<H>> {
let span = make_span(addr, self.outgoing.get(&addr));
span.clone()
.in_scope(move || match self.outgoing.entry(addr) {
Entry::Vacant(_vacant) => {
info!("unknown address blocked");
self.change_outgoing_state(addr, OutgoingState::Blocked { since: now });
None
}
// TODO: Check what happens on close on our end, i.e. can we distinguish in logs
// between a closed connection on our end vs one that failed?
Entry::Occupied(occupied) => match occupied.get().state {
OutgoingState::Blocked { .. } => {
debug!("address already blocked");
None
}
OutgoingState::Loopback => {
warn!("loopback address block ignored");
None
}
OutgoingState::Connected { ref handle, .. } => {
info!("connected address blocked, disconnecting");
let handle = handle.clone();
self.change_outgoing_state(addr, OutgoingState::Blocked { since: now });
Some(DialRequest::Disconnect { span, handle })
}
OutgoingState::Waiting { .. } | OutgoingState::Connecting { .. } => {
info!("address blocked");
self.change_outgoing_state(addr, OutgoingState::Blocked { since: now });
None
}
},
})
}
/// Removes an address from the block list.
///
/// Does nothing if the address was not blocked.
// This function is currently not in use by `small_network` itself.
#[allow(dead_code)]
pub(crate) fn redeem_addr(&mut self, addr: SocketAddr, now: Instant) -> Option<DialRequest<H>> {
let span = make_span(addr, self.outgoing.get(&addr));
span.clone()
.in_scope(move || match self.outgoing.entry(addr) {
Entry::Vacant(_) => {
debug!("unknown address redeemed");
None
}
Entry::Occupied(occupied) => match occupied.get().state {
OutgoingState::Blocked { .. } => {
self.change_outgoing_state(
addr,
OutgoingState::Connecting {
failures_so_far: 0,
since: now,
},
);
Some(DialRequest::Dial { addr, span })
}
_ => {
debug!("address redemption ignored, not blocked");
None
}
},
})
}
/// Performs housekeeping like reconnection or unblocking peers.
///
/// This function must periodically be called. A good interval is every second.
pub(super) fn perform_housekeeping(&mut self, now: Instant) -> Vec<DialRequest<H>> {
let mut to_forget = Vec::new();
let mut to_fail = Vec::new();
let mut to_reconnect = Vec::new();
for (&addr, outgoing) in self.outgoing.iter() {
let span = make_span(addr, Some(outgoing));
span.in_scope(|| match outgoing.state {
// Decide whether to attempt reconnecting a failed-waiting address.
OutgoingState::Waiting {
failures_so_far,
last_failure,
..
} => {
if failures_so_far > self.config.retry_attempts {
if outgoing.is_unforgettable {
// Unforgettable addresses simply have their timer reset.
info!("unforgettable address reset");
to_reconnect.push((addr, 0));
} else {
// Address had too many attempts at reconnection, we will forget
// it after exiting this closure.
to_forget.push(addr);
info!("address forgotten");
}
} else {
// The address has not exceeded the limit, so check if it is due.
let due = last_failure + self.config.calc_backoff(failures_so_far);
if now >= due {
debug!(attempts = failures_so_far, "address reconnecting");
to_reconnect.push((addr, failures_so_far));
}
}
}
OutgoingState::Blocked { since } => {
if now >= since + self.config.unblock_after {
info!("address unblocked");
to_reconnect.push((addr, 0));
}
}
OutgoingState::Connecting {
since,
failures_so_far,
} => {
let timeout = since + self.config.sweep_timeout;
if now >= timeout {
// The outer component has not called us with a `DialOutcome` in a
// reasonable amount of time. This should happen very rarely, ideally
// never.
warn!("address timed out connecting, was swept");
// Count the timeout as a failure against the connection.
to_fail.push((addr, failures_so_far + 1));
}
}
OutgoingState::Connected { .. } | OutgoingState::Loopback => {
// Entry is ignored. Not outputting any `trace` because this is log spam even at
// the `trace` level.
}
});
}
// Remove all addresses marked for forgetting.
to_forget.into_iter().for_each(|addr| {
self.outgoing.remove(&addr);
});
// Fail connections that are taking way too long to connect.
to_fail.into_iter().for_each(|(addr, failures_so_far)| {
let span = make_span(addr, self.outgoing.get(&addr));
span.in_scope(|| {
self.change_outgoing_state(
addr,
OutgoingState::Waiting {
failures_so_far,
error: None,
last_failure: now,
},
)
});
});
// Reconnect all others.
to_reconnect
.into_iter()
.map(|(addr, failures_so_far)| {
let span = make_span(addr, self.outgoing.get(&addr));
span.clone().in_scope(|| {
self.change_outgoing_state(
addr,
OutgoingState::Connecting {
failures_so_far,
since: now,
},
)
});
DialRequest::Dial { addr, span }
})
.collect()
}
/// Handles the outcome of a dialing attempt.
///
/// Note that reconnects will earliest happen on the next `perform_housekeeping` call.
pub(crate) fn handle_dial_outcome(
&mut self,
dial_outcome: DialOutcome<H, E>,
) -> Option<DialRequest<H>> {
let addr = dial_outcome.addr();
let span = make_span(addr, self.outgoing.get(&addr));
span.clone().in_scope(move || match dial_outcome {
DialOutcome::Successful {
addr,
handle,
node_id,
..
} => {
info!("established outgoing connection");
if let Some(Outgoing{
state: OutgoingState::Blocked { .. }, ..
}) = self.outgoing.get(&addr) {
// If we connected to a blocked address, do not go into connected, but stay
// blocked instead.
Some(DialRequest::Disconnect{
handle, span
})
} else {
// Otherwise, just record the connected state.
self.change_outgoing_state(
addr,
OutgoingState::Connected {
peer_id: node_id,
handle,
},
);
None
}
}
DialOutcome::Failed { addr, error, when } => {
info!(err = display_error(&error), "outgoing connection failed");
let failures_so_far = if let Some(outgoing) = self.outgoing.get(&addr) {
match outgoing.state {
OutgoingState::Connecting { failures_so_far,.. } => {
failures_so_far + 1
}
OutgoingState::Blocked { .. } => {
debug!("failed dial outcome after block ignored");
1
}
OutgoingState::Waiting { .. } |
OutgoingState::Connected { .. } |
OutgoingState::Loopback => {
warn!(
"processing dial outcome on a connection that was not marked as connecting or blocked"
);
1
}
}
} else {
warn!("processing dial outcome non-existent connection");
1
};
self.change_outgoing_state(
addr,
OutgoingState::Waiting {
failures_so_far,
error: Some(error),
last_failure: when,
},
);
None
}
DialOutcome::Loopback { addr } => {
info!("found loopback address");
self.change_outgoing_state(addr, OutgoingState::Loopback);
None
}
})
}
/// Notifies the connection manager about a dropped connection.
///
/// This will usually result in an immediate reconnection.
pub(crate) fn handle_connection_drop(
&mut self,
addr: SocketAddr,
now: Instant,
) -> Option<DialRequest<H>> {
let span = make_span(addr, self.outgoing.get(&addr));
span.clone().in_scope(move || {
if let Some(outgoing) = self.outgoing.get(&addr) {
match outgoing.state {
OutgoingState::Waiting { .. }
| OutgoingState::Loopback
| OutgoingState::Connecting { .. } => {
// We should, under normal circumstances, not receive drop notifications for
// any of these. Connection failures are handled by the dialer.
warn!("unexpected drop notification");
None
}
OutgoingState::Connected { .. } => {
// Drop the handle, immediately initiate a reconnection.
self.change_outgoing_state(
addr,
OutgoingState::Connecting {
failures_so_far: 0,
since: now,
},
);
Some(DialRequest::Dial { addr, span })
}
OutgoingState::Blocked { .. } => {
// Blocked addresses ignore connection drops.
debug!("received drop notification for blocked connection");
None
}
}
} else {
warn!("received connection drop notification for unknown connection");
None
}
})
}
}
#[cfg(test)]
mod tests {
use std::{net::SocketAddr, time::Duration};
use datasize::DataSize;
use thiserror::Error;
use super::{DialOutcome, DialRequest, NodeId, OutgoingConfig, OutgoingManager};
use crate::testing::{init_logging, test_clock::TestClock};
/// Error for test dialer.
///
/// Tracks a configurable id for the error.
#[derive(DataSize, Debug, Error)]
#[error("test dialer error({})", id)]
struct TestDialerError {
id: u32,
}
/// Setup an outgoing configuration for testing.
fn test_config() -> OutgoingConfig {
OutgoingConfig {
retry_attempts: 3,
base_timeout: Duration::from_secs(1),
unblock_after: Duration::from_secs(60),
sweep_timeout: Duration::from_secs(45),
}
}
/// Helper function that checks if a given dial request actually dials the expected address.
fn dials<'a, H, T>(expected: SocketAddr, requests: T) -> bool
where
T: IntoIterator<Item = &'a DialRequest<H>> + 'a,
H: 'a,
{
for req in requests.into_iter() {
if let DialRequest::Dial { addr, .. } = req {
if *addr == expected {
return true;
}
}
}
false
}
/// Helper function that checks if a given dial request actually disconnects the expected
/// address.
fn disconnects<'a, H, T>(expected: H, requests: T) -> bool
where
T: IntoIterator<Item = &'a DialRequest<H>> + 'a,
H: 'a + PartialEq,
{
for req in requests.into_iter() {
if let DialRequest::Disconnect { handle, .. } = req {
if *handle == expected {
return true;
}
}
}
false
}
#[test]
fn successful_lifecycle() {
init_logging();
let mut rng = crate::new_rng();
let mut clock = TestClock::new();
let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap();
let id_a = NodeId::random_tls(&mut rng);
let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());
// We begin by learning a single, regular address, triggering a dial request.
assert!(dials(
addr_a,
&manager.learn_addr(addr_a, false, clock.now())
));
// Our first connection attempt fails. The connection should now be in waiting state, but
// not reconnect, since the minimum delay is 2 seconds (2*base_timeout).
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_a,
error: TestDialerError { id: 1 },
when: clock.now(),
},)
.is_none());
// Performing housekeeping multiple times should not make a difference.
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// Advancing the clock will trigger a reconnection on the next housekeeping.
clock.advance_time(2_000);
assert!(dials(addr_a, &manager.perform_housekeeping(clock.now())));
// This time the connection succeeds.
assert!(manager
.handle_dial_outcome(DialOutcome::Successful {
addr: addr_a,
handle: 99,
node_id: id_a,
},)
.is_none());
// The routing table should have been updated and should return the handle.
assert_eq!(manager.get_route(id_a), Some(&99));
assert_eq!(manager.get_addr(id_a), Some(addr_a));
// Time passes, and our connection drops. Reconnecting should be immediate.
assert!(manager.perform_housekeeping(clock.now()).is_empty());
clock.advance_time(20_000);
assert!(dials(
addr_a,
&manager.handle_connection_drop(addr_a, clock.now())
));
// The route should have been cleared.
assert!(manager.get_route(id_a).is_none());
assert!(manager.get_addr(id_a).is_none());
// Reconnection is already in progress, so we do not expect another request on housekeeping.
assert!(manager.perform_housekeeping(clock.now()).is_empty());
}
#[test]
fn connections_forgotten_after_too_many_tries() {
init_logging();
let mut clock = TestClock::new();
let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap();
// Address `addr_b` will be a known address.
let addr_b: SocketAddr = "5.6.7.8:5678".parse().unwrap();
let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());
// First, attempt to connect. Tests are set to 3 retries after 2, 4 and 8 seconds.
assert!(dials(
addr_a,
&manager.learn_addr(addr_a, false, clock.now())
));
assert!(dials(
addr_b,
&manager.learn_addr(addr_b, true, clock.now())
));
// Fail the first connection attempts, not triggering a retry (timeout not reached yet).
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_a,
error: TestDialerError { id: 10 },
when: clock.now(),
},)
.is_none());
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_b,
error: TestDialerError { id: 11 },
when: clock.now(),
},)
.is_none());
// Learning the address again should not cause a reconnection.
assert!(manager.learn_addr(addr_a, false, clock.now()).is_none());
assert!(manager.learn_addr(addr_b, false, clock.now()).is_none());
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert!(manager.learn_addr(addr_a, false, clock.now()).is_none());
assert!(manager.learn_addr(addr_b, false, clock.now()).is_none());
// After 1.999 seconds, reconnection should still be delayed.
clock.advance_time(1_999);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// Adding 0.001 seconds finally is enough to reconnect.
clock.advance_time(1);
let requests = manager.perform_housekeeping(clock.now());
assert!(dials(addr_a, &requests));
assert!(dials(addr_b, &requests));
// Waiting for more than the reconnection delay should not be harmful or change
// anything, as we are currently connecting.
clock.advance_time(6_000);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// Fail the connection again, wait 3.999 seconds, expecting no reconnection.
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_a,
error: TestDialerError { id: 40 },
when: clock.now(),
},)
.is_none());
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_b,
error: TestDialerError { id: 41 },
when: clock.now(),
},)
.is_none());
clock.advance_time(3_999);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// Adding 0.001 seconds finally again pushes us over the threshold.
clock.advance_time(1);
let requests = manager.perform_housekeeping(clock.now());
assert!(dials(addr_a, &requests));
assert!(dials(addr_b, &requests));
// Fail the connection quickly.
clock.advance_time(25);
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_a,
error: TestDialerError { id: 10 },
when: clock.now(),
},)
.is_none());
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_b,
error: TestDialerError { id: 10 },
when: clock.now(),
},)
.is_none());
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// The last attempt should happen 8 seconds after the error, not the last attempt.
clock.advance_time(7_999);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
clock.advance_time(1);
let requests = manager.perform_housekeeping(clock.now());
assert!(dials(addr_a, &requests));
assert!(dials(addr_b, &requests));
// Fail the last attempt. No more reconnections should be happening.
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_a,
error: TestDialerError { id: 10 },
when: clock.now(),
},)
.is_none());
assert!(manager
.handle_dial_outcome(DialOutcome::Failed {
addr: addr_b,
error: TestDialerError { id: 10 },
when: clock.now(),
},)
.is_none());
// Only the unforgettable address should be reconnecting.
let requests = manager.perform_housekeeping(clock.now());
assert!(!dials(addr_a, &requests));
assert!(dials(addr_b, &requests));
// But not `addr_a`, even after a long wait.
clock.advance_time(1_000_000_000);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
}
#[test]
fn blocking_works() {
init_logging();
let mut rng = crate::new_rng();
let mut clock = TestClock::new();
let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap();
// We use `addr_b` as an unforgettable address, which does not mean it cannot be blocked!
let addr_b: SocketAddr = "5.6.7.8:5678".parse().unwrap();
let addr_c: SocketAddr = "9.0.1.2:9012".parse().unwrap();
let id_a = NodeId::random_tls(&mut rng);
let id_b = NodeId::random_tls(&mut rng);
let id_c = NodeId::random_tls(&mut rng);
let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());
// Block `addr_a` from the start.
assert!(manager.block_addr(addr_a, clock.now()).is_none());
// Learning both `addr_a` and `addr_b` should only trigger a connection to `addr_b` now.
assert!(manager.learn_addr(addr_a, false, clock.now()).is_none());
assert!(dials(
addr_b,
&manager.learn_addr(addr_b, true, clock.now())
));
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// Fifteen seconds later we succeed in connecting to `addr_b`.
clock.advance_time(15_000);
assert!(manager
.handle_dial_outcome(DialOutcome::Successful {
addr: addr_b,
handle: 101,
node_id: id_b,
},)
.is_none());
assert_eq!(manager.get_route(id_b), Some(&101));
// Invariant through housekeeping.
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert_eq!(manager.get_route(id_b), Some(&101));
// Another fifteen seconds later, we block `addr_b`.
clock.advance_time(15_000);
assert!(disconnects(101, &manager.block_addr(addr_b, clock.now())));
// `addr_c` will be blocked during the connection phase.
assert!(dials(
addr_c,
&manager.learn_addr(addr_c, false, clock.now())
));
assert!(manager.block_addr(addr_c, clock.now()).is_none());
// We are still expect to provide a dial outcome, but afterwards, there should be no
// route to C and an immediate disconnection should be queued.
assert!(disconnects(
42,
&manager.handle_dial_outcome(DialOutcome::Successful {
addr: addr_c,
handle: 42,
node_id: id_c,
},)
));
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert!(manager.get_route(id_c).is_none());
// At this point, we have blocked all three addresses. 30 seconds later, the first one is
// unblocked due to the block timing out.
clock.advance_time(30_000);
assert!(dials(addr_a, &manager.perform_housekeeping(clock.now())));
// Fifteen seconds later, B and C are still blocked, but we redeem B early.
clock.advance_time(15_000);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
assert!(dials(addr_b, &manager.redeem_addr(addr_b, clock.now())));
// Succeed both connections, and ensure we have routes to both.
assert!(manager
.handle_dial_outcome(DialOutcome::Successful {
addr: addr_b,
handle: 77,
node_id: id_b,
},)
.is_none());
assert!(manager
.handle_dial_outcome(DialOutcome::Successful {
addr: addr_a,
handle: 66,
node_id: id_a,
},)
.is_none());
assert_eq!(manager.get_route(id_a), Some(&66));
assert_eq!(manager.get_route(id_b), Some(&77));
}
#[test]
fn loopback_handled_correctly() {
init_logging();
let mut clock = TestClock::new();
let loopback_addr: SocketAddr = "1.2.3.4:1234".parse().unwrap();
let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());
// Loopback addresses are connected to only once, and then marked as loopback forever.
assert!(dials(
loopback_addr,
&manager.learn_addr(loopback_addr, false, clock.now())
));
assert!(manager
.handle_dial_outcome(DialOutcome::Loopback {
addr: loopback_addr,
},)
.is_none());
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// Learning loopbacks again should not trigger another connection
assert!(manager
.learn_addr(loopback_addr, false, clock.now())
.is_none());
// Blocking loopbacks does not result in a block, since regular blocks would clear after
// some time.
assert!(manager.block_addr(loopback_addr, clock.now()).is_none());
clock.advance_time(1_000_000_000);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
}
#[test]
fn connected_peers_works() {
init_logging();
let mut rng = crate::new_rng();
let clock = TestClock::new();
let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap();
let addr_b: SocketAddr = "5.6.7.8:5678".parse().unwrap();
let id_a = NodeId::random_tls(&mut rng);
let id_b = NodeId::random_tls(&mut rng);
let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());
manager.learn_addr(addr_a, false, clock.now());
manager.learn_addr(addr_b, true, clock.now());
manager.handle_dial_outcome(DialOutcome::Successful {
addr: addr_a,
handle: 22,
node_id: id_a,
});
manager.handle_dial_outcome(DialOutcome::Successful {
addr: addr_b,
handle: 33,
node_id: id_b,
});
let mut peer_ids: Vec<_> = manager.connected_peers().collect();
let mut expected = vec![id_a, id_b];
peer_ids.sort();
expected.sort();
assert_eq!(peer_ids, expected);
}
#[test]
fn sweeping_works() {
init_logging();
let mut rng = crate::new_rng();
let mut clock = TestClock::new();
let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap();
let id_a = NodeId::random_tls(&mut rng);
let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());
// Trigger a new connection via learning an address.
assert!(dials(
addr_a,
&manager.learn_addr(addr_a, false, clock.now())
));
// We now let enough time pass to cause the connection to be considered failed aborted.
// No effects are expected at this point.
clock.advance_time(50_000);
assert!(manager.perform_housekeeping(clock.now()).is_empty());
// The connection will now experience a regular failure. Since this is the first connection
// failure, it should reconnect after 2 seconds.
clock.advance_time(2_000);
assert!(dials(addr_a, &manager.perform_housekeeping(clock.now())));
// We now simulate the second connection (`handle: 2`) succeeding first, after 1 second.
clock.advance_time(1_000);
assert!(manager
.handle_dial_outcome(DialOutcome::Successful {
addr: addr_a,
handle: 2,
node_id: id_a,
})
.is_none());
// A route should now be established.
assert_eq!(manager.get_route(id_a), Some(&2));
// More time passes and the first connection attempt finally finishes.
clock.advance_time(30_000);
assert!(manager
.handle_dial_outcome(DialOutcome::Successful {
addr: addr_a,
handle: 1,
node_id: id_a,
})
.is_none());
// We now expect to be connected through the first connection (see documentation).
assert_eq!(manager.get_route(id_a), Some(&1));
}
}
| 36.884137 | 118 | 0.528872 |
214ee9977a5e0d7ccf4dd81ec0ff0e69b544f00e | 386 |
struct CustomSmartPointer {
data: String
}
impl Drop for CustomSmartPointer {
fn drop(&mut self) {
println!("Dropping CustomSmartPointer with data `{}`!", self.data);
}
}
fn main() {
let c = CustomSmartPointer {
data: String::from("my stuff")
};
let d = CustomSmartPointer {
data: String::from("other stuff")
};
println!("CustomSmartPointers created.");
drop(c);
}
| 15.44 | 69 | 0.668394 |
6a1e05f6139c9b7721affde4f6959d1a32c879c2 | 782 | #[allow(dead_code)]
pub fn read_varint(buf: &Vec<u8>, offset: usize) -> u64 {
let mut res = 0u64;
let mut curr = buf[offset];
let mut shift = 0u64;
let mut read_more = true;
while read_more {
if buf[offset] >= 0x80u8 {
curr ^= 0x80u8;
read_more = true;
} else {
read_more = false;
}
res |= (curr as u64) << shift;
shift += 7;
}
res
}
#[allow(dead_code)]
pub fn varint_to_vec(v: u64) -> Vec<u8> {
let mut v = v;
let mut res: Vec<u8> = vec![];
loop {
let mut curr = (v & 0xFF) as u8;
if curr >= 0x80 {
curr |= 0x80;
}
res.push(curr);
v >>= 7;
if v <= 0 {
break;
}
}
res
}
| 17.772727 | 57 | 0.445013 |
fe59e9173779e5746d6c64c8221dd2633871ed9e | 855 | pub use bevy_reflect_derive::TypeUuid;
pub use bevy_utils::Uuid;
pub trait TypeUuid {
const TYPE_UUID: Uuid;
}
pub trait TypeUuidDynamic {
fn type_uuid(&self) -> Uuid;
fn type_name(&self) -> &'static str;
}
impl<T> TypeUuidDynamic for T
where
T: TypeUuid,
{
fn type_uuid(&self) -> Uuid {
Self::TYPE_UUID
}
fn type_name(&self) -> &'static str {
std::any::type_name::<Self>()
}
}
#[cfg(test)]
mod test {
use super::*;
#[derive(TypeUuid)]
#[uuid = "af6466c2-a9f4-11eb-bcbc-0242ac130002"]
struct TestDeriveStruct<T>
where
T: Clone,
{
_value: T,
}
fn test_impl_type_uuid(_: &impl TypeUuid) {}
#[test]
fn test_generic_type_uuid_derive() {
let test_struct = TestDeriveStruct { _value: 42 };
test_impl_type_uuid(&test_struct);
}
}
| 18.191489 | 58 | 0.603509 |
0ed8da801e90eb6e0c774f5095636d3de156e7df | 10,577 | #[doc = "Register `FRAMECONFIG` reader"]
pub struct R(crate::R<FRAMECONFIG_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<FRAMECONFIG_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<FRAMECONFIG_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<FRAMECONFIG_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `FRAMECONFIG` writer"]
pub struct W(crate::W<FRAMECONFIG_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<FRAMECONFIG_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<FRAMECONFIG_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<FRAMECONFIG_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Parity expected or not in RX frame\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PARITY_A {
#[doc = "0: Parity is not expected in RX frames"]
NOPARITY = 0,
#[doc = "1: Parity is expected in RX frames"]
PARITY = 1,
}
impl From<PARITY_A> for bool {
#[inline(always)]
fn from(variant: PARITY_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `PARITY` reader - Parity expected or not in RX frame"]
pub struct PARITY_R(crate::FieldReader<bool, PARITY_A>);
impl PARITY_R {
pub(crate) fn new(bits: bool) -> Self {
PARITY_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PARITY_A {
match self.bits {
false => PARITY_A::NOPARITY,
true => PARITY_A::PARITY,
}
}
#[doc = "Checks if the value of the field is `NOPARITY`"]
#[inline(always)]
pub fn is_no_parity(&self) -> bool {
**self == PARITY_A::NOPARITY
}
#[doc = "Checks if the value of the field is `PARITY`"]
#[inline(always)]
pub fn is_parity(&self) -> bool {
**self == PARITY_A::PARITY
}
}
impl core::ops::Deref for PARITY_R {
type Target = crate::FieldReader<bool, PARITY_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PARITY` writer - Parity expected or not in RX frame"]
pub struct PARITY_W<'a> {
w: &'a mut W,
}
impl<'a> PARITY_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PARITY_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Parity is not expected in RX frames"]
#[inline(always)]
pub fn no_parity(self) -> &'a mut W {
self.variant(PARITY_A::NOPARITY)
}
#[doc = "Parity is expected in RX frames"]
#[inline(always)]
pub fn parity(self) -> &'a mut W {
self.variant(PARITY_A::PARITY)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "SoF expected or not in RX frames\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SOF_A {
#[doc = "0: Start of Frame symbol is not expected in RX frames"]
NOSOF = 0,
#[doc = "1: Start of Frame symbol is expected in RX frames"]
SOF = 1,
}
impl From<SOF_A> for bool {
#[inline(always)]
fn from(variant: SOF_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `SOF` reader - SoF expected or not in RX frames"]
pub struct SOF_R(crate::FieldReader<bool, SOF_A>);
impl SOF_R {
pub(crate) fn new(bits: bool) -> Self {
SOF_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SOF_A {
match self.bits {
false => SOF_A::NOSOF,
true => SOF_A::SOF,
}
}
#[doc = "Checks if the value of the field is `NOSOF`"]
#[inline(always)]
pub fn is_no_so_f(&self) -> bool {
**self == SOF_A::NOSOF
}
#[doc = "Checks if the value of the field is `SOF`"]
#[inline(always)]
pub fn is_so_f(&self) -> bool {
**self == SOF_A::SOF
}
}
impl core::ops::Deref for SOF_R {
type Target = crate::FieldReader<bool, SOF_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SOF` writer - SoF expected or not in RX frames"]
pub struct SOF_W<'a> {
w: &'a mut W,
}
impl<'a> SOF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SOF_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Start of Frame symbol is not expected in RX frames"]
#[inline(always)]
pub fn no_so_f(self) -> &'a mut W {
self.variant(SOF_A::NOSOF)
}
#[doc = "Start of Frame symbol is expected in RX frames"]
#[inline(always)]
pub fn so_f(self) -> &'a mut W {
self.variant(SOF_A::SOF)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "CRC mode for incoming frames\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CRCMODERX_A {
#[doc = "0: CRC is not expected in RX frames"]
NOCRCRX = 0,
#[doc = "1: Last 16 bits in RX frame is CRC, CRC is checked and CRCSTATUS updated"]
CRC16RX = 1,
}
impl From<CRCMODERX_A> for bool {
#[inline(always)]
fn from(variant: CRCMODERX_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `CRCMODERX` reader - CRC mode for incoming frames"]
pub struct CRCMODERX_R(crate::FieldReader<bool, CRCMODERX_A>);
impl CRCMODERX_R {
pub(crate) fn new(bits: bool) -> Self {
CRCMODERX_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CRCMODERX_A {
match self.bits {
false => CRCMODERX_A::NOCRCRX,
true => CRCMODERX_A::CRC16RX,
}
}
#[doc = "Checks if the value of the field is `NOCRCRX`"]
#[inline(always)]
pub fn is_no_crcrx(&self) -> bool {
**self == CRCMODERX_A::NOCRCRX
}
#[doc = "Checks if the value of the field is `CRC16RX`"]
#[inline(always)]
pub fn is_crc16rx(&self) -> bool {
**self == CRCMODERX_A::CRC16RX
}
}
impl core::ops::Deref for CRCMODERX_R {
type Target = crate::FieldReader<bool, CRCMODERX_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CRCMODERX` writer - CRC mode for incoming frames"]
pub struct CRCMODERX_W<'a> {
w: &'a mut W,
}
impl<'a> CRCMODERX_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CRCMODERX_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "CRC is not expected in RX frames"]
#[inline(always)]
pub fn no_crcrx(self) -> &'a mut W {
self.variant(CRCMODERX_A::NOCRCRX)
}
#[doc = "Last 16 bits in RX frame is CRC, CRC is checked and CRCSTATUS updated"]
#[inline(always)]
pub fn crc16rx(self) -> &'a mut W {
self.variant(CRCMODERX_A::CRC16RX)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
impl R {
#[doc = "Bit 0 - Parity expected or not in RX frame"]
#[inline(always)]
pub fn parity(&self) -> PARITY_R {
PARITY_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 2 - SoF expected or not in RX frames"]
#[inline(always)]
pub fn sof(&self) -> SOF_R {
SOF_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 4 - CRC mode for incoming frames"]
#[inline(always)]
pub fn crcmoderx(&self) -> CRCMODERX_R {
CRCMODERX_R::new(((self.bits >> 4) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Parity expected or not in RX frame"]
#[inline(always)]
pub fn parity(&mut self) -> PARITY_W {
PARITY_W { w: self }
}
#[doc = "Bit 2 - SoF expected or not in RX frames"]
#[inline(always)]
pub fn sof(&mut self) -> SOF_W {
SOF_W { w: self }
}
#[doc = "Bit 4 - CRC mode for incoming frames"]
#[inline(always)]
pub fn crcmoderx(&mut self) -> CRCMODERX_W {
CRCMODERX_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Configuration of incoming frames\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [frameconfig](index.html) module"]
pub struct FRAMECONFIG_SPEC;
impl crate::RegisterSpec for FRAMECONFIG_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [frameconfig::R](R) reader structure"]
impl crate::Readable for FRAMECONFIG_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [frameconfig::W](W) writer structure"]
impl crate::Writable for FRAMECONFIG_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets FRAMECONFIG to value 0x15"]
impl crate::Resettable for FRAMECONFIG_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x15
}
}
| 30.569364 | 424 | 0.579749 |
1cbb85d0f9282c33800c757aebf875f9a44eb598 | 9,348 | // DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
//! This documentation was generated from *Firebase Cloud Messaging* crate version *2.0.8+20210329*, where *20210329* is the exact revision of the *fcm:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v2.0.8*.
//!
//! Everything else about the *Firebase Cloud Messaging* *v1* API can be found at the
//! [official documentation site](https://firebase.google.com/docs/cloud-messaging).
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/main/gen/fcm1).
//! # Features
//!
//! Handle the following *Resources* with ease from the central [hub](FirebaseCloudMessaging) ...
//!
//! * projects
//! * [*messages send*](api::ProjectMessageSendCall)
//!
//!
//!
//!
//! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs).
//!
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](FirebaseCloudMessaging)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](client::MethodsBuilder) which in turn
//! allow access to individual [*Call Builders*](client::CallBuilder)
//! * **[Resources](client::Resource)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](client::Part)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](client::CallBuilder)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit().await
//! ```
//!
//! Or specifically ...
//!
//! ```ignore
//! let r = hub.projects().messages_send(...).doit().await
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-fcm1 = "*"
//! hyper = "^0.14"
//! hyper-rustls = "^0.22"
//! serde = "^1.0"
//! serde_json = "^1.0"
//! yup-oauth2 = "^5.0"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate hyper_rustls;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_fcm1 as fcm1;
//! use fcm1::api::SendMessageRequest;
//! use fcm1::{Result, Error};
//! # async fn dox() {
//! use std::default::Default;
//! use oauth2;
//! use fcm1::FirebaseCloudMessaging;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: oauth2::ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
//! secret,
//! yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
//! ).build().await.unwrap();
//! let mut hub = FirebaseCloudMessaging::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
//! // As the method needs a request, you would usually fill it with the desired information
//! // into the respective structure. Some of the parts shown here might not be applicable !
//! // Values shown here are possibly random and not representative !
//! let mut req = SendMessageRequest::default();
//!
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative !
//! let result = hub.projects().messages_send(req, "parent")
//! .doit().await;
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::Io(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](client::Result) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](client::Delegate), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](client::Result), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](client::ResponseResult), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](client::Delegate) to the
//! [Method Builder](client::CallBuilder) before making the final `doit()` call.
//! Respective methods will be called to provide progress information, as well as determine whether the system should
//! retry on failure.
//!
//! The [delegate trait](client::Delegate) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [encodable](client::RequestValue) and
//! [decodable](client::ResponseResult) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](client::Part) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](client::CallBuilder), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](client::RequestValue) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut ... .
#![allow(unused_imports, unused_mut, dead_code)]
// DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde;
extern crate serde_json;
extern crate yup_oauth2 as oauth2;
extern crate mime;
extern crate url;
pub mod api;
pub mod client;
// Re-export the hub type and some basic client structs
pub use api::FirebaseCloudMessaging;
pub use client::{Result, Error, Delegate};
| 44.303318 | 238 | 0.682606 |
5dd3faa67cca3c6992a1ea90e51f92bd8003f9da | 15,173 | use anyhow::{self, bail, Context as _};
use cargo_about::licenses;
use cargo_about::licenses::LicenseInfo;
use codespan_reporting::term;
use handlebars::Handlebars;
use krates::cm::Package;
use krates::{Utf8Path as Path, Utf8PathBuf as PathBuf};
use serde::Serialize;
use std::collections::BTreeMap;
#[derive(clap::Parser, Debug)]
pub struct Args {
/// Path to the config to use
///
/// Defaults to <manifest_root>/about.toml if not specified
#[clap(short, long)]
config: Option<PathBuf>,
/// The confidence threshold required for license files
/// to be positively identified: 0.0 - 1.0
#[clap(long, default_value = "0.8")]
threshold: f32,
/// The name of the template to use when rendering. If only passing a
/// single template file to `templates` this is not used.
#[clap(short, long)]
name: Option<String>,
/// A file to write the generated output to. Typically an .html file.
#[clap(short, long)]
output_file: Option<PathBuf>,
/// Space-separated list of features to activate
#[clap(long)]
features: Vec<String>,
/// Activate all available features
#[clap(long)]
all_features: bool,
/// Do not activate the `default` feature
#[clap(long)]
no_default_features: bool,
/// The path of the Cargo.toml for the root crate, defaults to the
/// current crate or workspace in the current working directory
#[clap(short, long)]
manifest_path: Option<PathBuf>,
/// Scan licenses for the entire workspace, not just the active package
#[clap(long)]
workspace: bool,
/// The template(s) or template directory to use. Must either be a `.hbs`
/// file, or have at least one `.hbs` file in it if it is a directory
templates: PathBuf,
}
fn load_config(manifest_path: &Path) -> anyhow::Result<cargo_about::licenses::config::Config> {
let mut parent = manifest_path.parent();
// Move up directories until we find an about.toml, to handle
// cases where eg in a workspace there is a top-level about.toml
// but the user is only getting a listing for a particular crate from it
while let Some(p) = parent {
// We _could_ limit ourselves to only directories that also have a Cargo.toml
// in them, but there could be cases where someone has multiple
// rust projects in subdirectories with a single top level about.toml that is
// used across all of them, we could also introduce a metadata entry for the
// relative path of the about.toml to use for the crate/workspace
// if !p.join("Cargo.toml").exists() {
// parent = p.parent();
// continue;
// }
let about_toml = p.join("about.toml");
if about_toml.exists() {
let contents = std::fs::read_to_string(&about_toml)?;
let cfg = toml::from_str(&contents)?;
log::info!("loaded config from {}", about_toml);
return Ok(cfg);
}
parent = p.parent();
}
log::warn!("no 'about.toml' found, falling back to default configuration");
Ok(cargo_about::licenses::config::Config::default())
}
pub fn cmd(args: Args, color: crate::Color) -> anyhow::Result<()> {
let manifest_path = match args.manifest_path.clone() {
Some(mp) => mp,
None => {
let cwd =
std::env::current_dir().context("unable to determine current working directory")?;
let mut cwd = PathBuf::from_path_buf(cwd).map_err(|pb| {
anyhow::anyhow!(
"current working directory '{}' is not a utf-8 path",
pb.display()
)
})?;
cwd.push("Cargo.toml");
cwd
}
};
if !manifest_path.exists() {
bail!("cargo manifest path '{}' does not exist", manifest_path);
}
let cfg = match &args.config {
Some(cfg_path) => {
let cfg_str = std::fs::read_to_string(cfg_path)
.with_context(|| format!("unable to read {}", cfg_path))?;
toml::from_str(&cfg_str)
.with_context(|| format!("unable to deserialize config from {}", cfg_path))?
}
None => load_config(&manifest_path)?,
};
let (all_crates, store) = rayon::join(
|| {
log::info!("gathering crates for {}", manifest_path);
cargo_about::get_all_crates(
&manifest_path,
args.no_default_features,
args.all_features,
args.features.clone(),
args.workspace,
&cfg,
)
},
|| {
log::info!("loading license store");
cargo_about::licenses::store_from_cache()
},
);
let krates = all_crates?;
let store = store?;
log::info!("gathered {} crates", krates.len());
let (registry, template) = {
let mut reg = Handlebars::new();
if !args.templates.exists() {
bail!("template(s) path {} does not exist", args.templates);
}
use handlebars::*;
reg.register_helper(
"json",
Box::new(
|h: &Helper<'_, '_>,
_r: &Handlebars<'_>,
_: &Context,
_rc: &mut RenderContext<'_, '_>,
out: &mut dyn Output|
-> HelperResult {
let param = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?;
out.write(&serde_json::to_string_pretty(param.value())?)?;
Ok(())
},
),
);
if args.templates.is_dir() {
reg.register_templates_directory(".hbs", &args.templates)?;
if reg.get_templates().is_empty() {
bail!(
"template path {} did not contain any hbs files",
args.templates
);
}
(reg, args.name.context("specified a directory for templates, but did not provide the name of the template to use")?)
} else {
// Ignore the extension, if the user says they want to use a specific file, that's on them
reg.register_template_file("tmpl", args.templates)?;
(reg, "tmpl".to_owned())
}
};
let client = cd::client::Client::new();
let summary = licenses::Gatherer::with_store(std::sync::Arc::new(store), client)
.with_confidence_threshold(args.threshold)
.gather(&krates, &cfg);
let (files, resolved) = licenses::resolution::resolve(&summary, &cfg.accepted, &cfg.crates);
use term::termcolor::ColorChoice;
let stream = term::termcolor::StandardStream::stderr(match color {
crate::Color::Auto => {
// The termcolor crate doesn't check the stream to see if it's a TTY
// which doesn't really fit with how the rest of the coloring works
if atty::is(atty::Stream::Stderr) {
ColorChoice::Auto
} else {
ColorChoice::Never
}
}
crate::Color::Always => ColorChoice::Always,
crate::Color::Never => ColorChoice::Never,
});
let output = generate(&summary, &resolved, &files, stream, ®istry, &template)?;
match args.output_file.as_ref() {
None => println!("{}", output),
Some(path) if path == Path::new("-") => println!("{}", output),
Some(path) => {
std::fs::write(path, output)
.with_context(|| format!("output file {} could not be written", path))?;
}
}
Ok(())
}
#[derive(Clone, Serialize)]
struct UsedBy<'a> {
#[serde(rename = "crate")]
krate: &'a krates::cm::Package,
path: Option<PathBuf>,
}
#[derive(Clone, Serialize)]
struct License<'a> {
/// The full name of the license
name: String,
/// The SPDX short identifier for the license
id: String,
/// The full license text
text: String,
/// The path where the license text was sourced from
source_path: Option<PathBuf>,
/// The list of crates this license was applied to
used_by: Vec<UsedBy<'a>>,
}
#[derive(Serialize)]
struct LicenseSet {
count: usize,
name: String,
id: String,
indices: Vec<usize>,
text: String,
}
#[derive(Serialize)]
struct Input<'a> {
overview: Vec<LicenseSet>,
licenses: Vec<License<'a>>,
crates: Vec<PackageLicense<'a>>,
}
fn generate(
nfos: &[licenses::KrateLicense<'_>],
resolved: &[Option<licenses::Resolved>],
files: &licenses::resolution::Files,
stream: term::termcolor::StandardStream,
hbs: &Handlebars<'_>,
template_name: &str,
) -> anyhow::Result<String> {
use cargo_about::licenses::resolution::Severity;
let mut num_errors = 0;
let diag_cfg = term::Config::default();
let licenses = {
let mut licenses = BTreeMap::new();
for (krate_license, resolved) in nfos
.iter()
.zip(resolved.iter())
.filter_map(|(kl, res)| res.as_ref().map(|res| (kl, res)))
{
if !resolved.diagnostics.is_empty() {
let mut streaml = stream.lock();
for diag in &resolved.diagnostics {
if diag.severity >= Severity::Error {
num_errors += 1;
}
term::emit(&mut streaml, &diag_cfg, files, diag)?;
}
}
let license_iter = resolved.licenses.iter().flat_map(|license| {
let mut license_texts = Vec::new();
match license.license {
spdx::LicenseItem::Spdx { id, .. } => {
// Attempt to retrieve the actual license file from the crate, note that in some cases
// _sigh_ there are actually multiple license texts for the same license with different
// copyright holders/authors/attribution so we can't just return 1
license_texts.extend(krate_license
.license_files
.iter()
.filter_map(|lf| {
// Check if this is the actual license file we want
if !lf
.license_expr
.evaluate(|ereq| ereq.license.id() == Some(id))
{
return None;
}
match &lf.kind {
licenses::LicenseFileKind::Text(text)
| licenses::LicenseFileKind::AddendumText(text, _) => {
let license = License {
name: id.full_name.to_owned(),
id: id.name.to_owned(),
text: text.clone(),
source_path: Some(lf.path.clone()),
used_by: Vec::new(),
};
Some(license)
}
licenses::LicenseFileKind::Header => None,
}
}));
if license_texts.is_empty() {
log::debug!(
"unable to find text for license '{}' for crate '{}', falling back to canonical text",
license,
krate_license.krate
);
// If the crate doesn't have the actual license file,
// fallback to the canonical license text and emit a warning
license_texts.push(License {
name: id.full_name.to_owned(),
id: id.name.to_owned(),
text: id.text().to_owned(),
source_path: None,
used_by: Vec::new(),
});
}
}
spdx::LicenseItem::Other { .. } => {
log::warn!(
"{} has no license file for crate '{}'",
license,
krate_license.krate
);
}
}
license_texts
});
for license in license_iter {
let entry = licenses
.entry(license.name.clone())
.or_insert_with(BTreeMap::new);
let lic = entry.entry(license.text.clone()).or_insert_with(|| license);
lic.used_by.push(UsedBy {
krate: krate_license.krate,
path: None,
});
}
}
let mut licenses: Vec<_> = licenses
.into_iter()
.flat_map(|(_, v)| v.into_iter().map(|(_, v)| v))
.collect();
// Sort the krates that use a license lexicographically
for lic in &mut licenses {
lic.used_by.sort_by(|a, b| a.krate.id.cmp(&b.krate.id));
}
licenses.sort_by(|a, b| a.id.cmp(&b.id));
licenses
};
if num_errors > 0 {
anyhow::bail!(
"encountered {} errors resolving licenses, unable to generate output",
num_errors
);
}
let mut overview: Vec<LicenseSet> = Vec::with_capacity(256);
for (ndx, lic) in licenses.iter().enumerate() {
match overview.binary_search_by(|i| i.id.cmp(&lic.id)) {
Ok(i) => overview[i].indices.push(ndx),
Err(i) => {
let mut ls = LicenseSet {
count: 0,
name: lic.name.clone(),
id: lic.id.clone(),
indices: Vec::with_capacity(10),
text: lic.text.clone(),
};
ls.indices.push(ndx);
overview.insert(i, ls);
}
}
}
overview.iter_mut().for_each(|i| i.count = i.indices.len());
// Show the most used licenses first
overview.sort_by(|a, b| b.count.cmp(&a.count));
let crates = nfos
.iter()
.filter(|nfo| !matches!(nfo.lic_info, LicenseInfo::Ignore))
.map(|nfo| PackageLicense {
package: &nfo.krate.0,
license: nfo.lic_info.to_string(),
})
.collect();
let nput = Input {
overview,
licenses,
crates,
};
Ok(hbs.render(template_name, &nput)?)
}
#[derive(Serialize)]
struct PackageLicense<'a> {
package: &'a Package,
license: String,
}
| 34.562642 | 129 | 0.501812 |
7151797a824fa71ad00ee2129f1809f83b756876 | 3,580 | #![no_std]
#![feature(asm)]
#![feature(global_asm)]
#![deny(warnings, unused_must_use)]
extern crate alloc;
#[macro_use]
extern crate log;
use {
alloc::{boxed::Box, string::String, sync::Arc, vec::Vec},
kernel_hal::{GeneralRegs, MMUFlags},
linux_object::{
fs::{vfs::FileSystem, INodeExt},
loader::LinuxElfLoader,
process::ProcessExt,
thread::ThreadExt,
},
linux_syscall::Syscall,
zircon_object::task::*,
};
pub fn run(args: Vec<String>, envs: Vec<String>, rootfs: Arc<dyn FileSystem>) -> Arc<Process> {
let job = Job::root();
let proc = Process::create_linux(&job, rootfs.clone()).unwrap();
let thread = Thread::create_linux(&proc).unwrap();
let loader = LinuxElfLoader {
#[cfg(feature = "std")]
syscall_entry: kernel_hal_unix::syscall_entry as usize,
#[cfg(not(feature = "std"))]
syscall_entry: 0,
stack_pages: 8,
root_inode: rootfs.root_inode(),
};
let inode = rootfs.root_inode().lookup(&args[0]).unwrap();
let data = inode.read_as_vec().unwrap();
let (entry, sp) = loader.load(&proc.vmar(), &data, args, envs).unwrap();
thread
.start(entry, sp, 0, 0, spawn)
.expect("failed to start main thread");
proc
}
fn spawn(thread: Arc<Thread>) {
let vmtoken = thread.proc().vmar().table_phys();
let future = async move {
loop {
let mut cx = thread.wait_for_run().await;
trace!("go to user: {:#x?}", cx);
kernel_hal::context_run(&mut cx);
trace!("back from user: {:#x?}", cx);
let mut exit = false;
match cx.trap_num {
0x100 => exit = handle_syscall(&thread, &mut cx.general).await,
0x20..=0x3f => {
kernel_hal::InterruptManager::handle(cx.trap_num as u8);
if cx.trap_num == 0x20 {
kernel_hal::yield_now().await;
}
}
0xe => {
let vaddr = kernel_hal::fetch_fault_vaddr();
let flags = if cx.error_code & 0x2 == 0 {
MMUFlags::READ
} else {
MMUFlags::WRITE
};
error!("page fualt from user mode {:#x} {:#x?}", vaddr, flags);
let vmar = thread.proc().vmar();
match vmar.handle_page_fault(vaddr, flags) {
Ok(()) => {}
Err(_) => {
panic!("Page Fault from user mode {:#x?}", cx);
}
}
}
_ => panic!("not supported interrupt from user mode. {:#x?}", cx),
}
thread.end_running(cx);
if exit {
break;
}
}
};
kernel_hal::Thread::spawn(Box::pin(future), vmtoken);
}
async fn handle_syscall(thread: &Arc<Thread>, regs: &mut GeneralRegs) -> bool {
trace!("syscall: {:#x?}", regs);
let num = regs.rax as u32;
let args = [regs.rdi, regs.rsi, regs.rdx, regs.r10, regs.r8, regs.r9];
let mut syscall = Syscall {
thread,
#[cfg(feature = "std")]
syscall_entry: kernel_hal_unix::syscall_entry as usize,
#[cfg(not(feature = "std"))]
syscall_entry: 0,
spawn_fn: spawn,
regs,
exit: false,
};
let ret = syscall.syscall(num, args).await;
let exit = syscall.exit;
regs.rax = ret as usize;
exit
}
| 33.148148 | 95 | 0.505866 |
38d5bfbce430d7ba7e70c57d2e5e3e62acbe408a | 54,916 | //! This module implements the API access to the Sentry API as well
//! as some other APIs we interact with. In particular it can talk
//! to the GitHub API to figure out if there are new releases of the
//! sentry-cli tool.
use std::borrow::Cow;
use std::cell::{RefCell, RefMut};
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::fs;
use std::io::{self, Read, Write};
use std::path::Path;
use std::rc::Rc;
use std::str;
use std::sync::Arc;
use std::thread;
use brotli2::write::BrotliEncoder;
use chrono::{DateTime, Duration, Utc};
use curl;
use failure::{Backtrace, Context, Error, Fail, ResultExt};
use flate2::write::GzEncoder;
use indicatif::ProgressBar;
use parking_lot::RwLock;
use regex::{Captures, Regex};
// use sentry::{Dsn, protocol::Event};
use serde::de::{Deserialize, DeserializeOwned, Deserializer};
use serde::Serialize;
use serde_json;
use sha1::Digest;
use symbolic::debuginfo::DebugId;
use url::percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET};
use config::{Auth, Config};
use constants::{ARCH, EXT, PLATFORM, VERSION};
use utils::android::AndroidManifest;
use utils::sourcemaps::get_sourcemap_reference_from_headers;
use utils::ui::{capitalize_string, make_byte_progress_bar};
use utils::xcode::InfoPlist;
/// Wrapper that escapes arguments for URL path segments.
pub struct PathArg<A: fmt::Display>(A);
thread_local! {
static API: Rc<Api> = Rc::new(Api::new());
}
impl<A: fmt::Display> fmt::Display for PathArg<A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// if we put values into the path we need to url encode them. However
// special care needs to be taken for any slash character or path
// segments that would end up as ".." or "." for security reasons.
// Since we cannot handle slashes there we just replace them with the
// unicode replacement character as a quick workaround. This will
// typically result in 404s from the server.
let mut val = format!("{}", self.0).replace('/', "\u{fffd}");
if val == ".." || val == "." {
val = "\u{fffd}".into();
}
utf8_percent_encode(&val, DEFAULT_ENCODE_SET).fmt(f)
}
}
pub enum ProgressBarMode {
Disabled,
Request,
Response,
Both,
Shared((Arc<ProgressBar>, u64, usize, Arc<RwLock<Vec<u64>>>)),
}
impl ProgressBarMode {
/// Returns if progress bars are generally enabled.
pub fn active(&self) -> bool {
match *self {
ProgressBarMode::Disabled => false,
_ => true,
}
}
/// Returns whether a progress bar should be displayed for during upload.
pub fn request(&self) -> bool {
match *self {
ProgressBarMode::Request | ProgressBarMode::Both => true,
_ => false,
}
}
/// Returns whether a progress bar should be displayed for during download.
pub fn response(&self) -> bool {
match *self {
ProgressBarMode::Response | ProgressBarMode::Both => true,
_ => false,
}
}
}
/// Helper for the API access.
pub struct Api {
config: Arc<Config>,
shared_handle: RefCell<curl::easy::Easy>,
}
/// Represents file contents temporarily
pub enum FileContents<'a> {
FromPath(&'a Path),
FromBytes(&'a [u8]),
}
#[derive(Debug, Fail)]
pub struct SentryError {
status: u32,
detail: Option<String>,
extra: Option<serde_json::Value>,
}
impl fmt::Display for SentryError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let detail = self.detail.as_ref().map(|x| x.as_str()).unwrap_or("");
write!(
f,
"sentry reported an error: {} (http status: {})",
if detail.is_empty() {
match self.status {
400 => "bad request",
401 => "unauthorized",
404 => "not found",
500 => "internal server error",
_ => "unknown error",
}
} else {
detail
},
self.status
)?;
if let Some(ref extra) = self.extra {
write!(f, "\n {:?}", extra)?;
}
Ok(())
}
}
/// Represents API errors.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)]
pub enum ApiErrorKind {
#[fail(display = "could not serialize value as JSON")]
CannotSerializeAsJson,
#[fail(display = "could not parse JSON response")]
BadJson,
#[fail(display = "not a JSON response")]
NotJson,
#[fail(display = "no DSN")]
NoDsn,
#[fail(display = "request failed because API URL was incorrectly formatted")]
BadApiUrl,
#[fail(display = "organization not found")]
OrganizationNotFound,
#[fail(display = "project not found")]
ProjectNotFound,
#[fail(display = "release not found")]
ReleaseNotFound,
#[fail(display = "chunk upload endpoint not supported by sentry server")]
ChunkUploadNotSupported,
#[fail(display = "API request failed")]
RequestFailed,
#[fail(display = "could not compress data")]
CompressionFailed,
}
#[derive(Debug)]
pub struct ApiError {
inner: Context<ApiErrorKind>,
}
#[derive(Clone, Debug, Fail)]
#[fail(
display = "project was renamed to '{}'\n\nPlease use this slug in your .sentryclirc or sentry.properties or for the --project parameter",
_0
)]
pub struct ProjectRenamedError(String);
impl Fail for ApiError {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.inner, f)
}
}
impl ApiError {
pub fn kind(&self) -> ApiErrorKind {
*self.inner.get_context()
}
}
impl From<ApiErrorKind> for ApiError {
fn from(kind: ApiErrorKind) -> ApiError {
ApiError {
inner: Context::new(kind),
}
}
}
impl From<Context<ApiErrorKind>> for ApiError {
fn from(inner: Context<ApiErrorKind>) -> ApiError {
ApiError { inner }
}
}
impl From<curl::Error> for ApiError {
fn from(err: curl::Error) -> ApiError {
Error::from(err).context(ApiErrorKind::RequestFailed).into()
}
}
impl From<curl::FormError> for ApiError {
fn from(err: curl::FormError) -> ApiError {
Error::from(err).context(ApiErrorKind::RequestFailed).into()
}
}
/// Shortcut alias for results of this module.
pub type ApiResult<T> = Result<T, ApiError>;
/// Represents an HTTP method that is used by the API.
#[derive(PartialEq, Debug)]
pub enum Method {
Get,
Head,
Post,
Put,
Delete,
}
impl fmt::Display for Method {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Method::Get => write!(f, "GET"),
Method::Head => write!(f, "HEAD"),
Method::Post => write!(f, "POST"),
Method::Put => write!(f, "PUT"),
Method::Delete => write!(f, "DELETE"),
}
}
}
/// Represents an API request. This can be customized before
/// sending but only sent once.
pub struct ApiRequest<'a> {
handle: RefMut<'a, curl::easy::Easy>,
headers: curl::easy::List,
body: Option<Vec<u8>>,
progress_bar_mode: ProgressBarMode,
}
/// Represents an API response.
#[derive(Clone, Debug)]
pub struct ApiResponse {
status: u32,
headers: Vec<String>,
body: Option<Vec<u8>>,
}
impl Api {
/// Creates a new API access helper. For as long as it lives HTTP
/// keepalive can be used. When the object is recreated new
/// connections will be established.
pub fn new() -> Api {
Api::with_config(Config::get_current())
}
/// Returns the current api for the thread.
pub fn get_current() -> Rc<Api> {
API.with(|api| api.clone())
}
/// Similar to `new` but uses a specific config.
pub fn with_config(config: Arc<Config>) -> Api {
Api {
config: config,
shared_handle: RefCell::new(curl::easy::Easy::new()),
}
}
// Low Level Methods
/// Create a new `ApiRequest` for the given HTTP method and URL. If the
/// URL is just a path then it's relative to the configured API host
/// and authentication is automatically enabled.
pub fn request<'a>(&'a self, method: Method, url: &str) -> ApiResult<ApiRequest<'a>> {
let mut handle = self.shared_handle.borrow_mut();
handle.reset();
if !self.config.allow_keepalive() {
handle.forbid_reuse(true).ok();
}
let mut ssl_opts = curl::easy::SslOpt::new();
if self.config.disable_ssl_revocation_check() {
ssl_opts.no_revoke(true);
}
handle.ssl_options(&ssl_opts)?;
let (url, auth) = if url.starts_with("http://") || url.starts_with("https://") {
(Cow::Borrowed(url), None)
} else {
(
Cow::Owned(match self.config.get_api_endpoint(url) {
Ok(rv) => rv,
Err(err) => {
return Err(Error::from(err).context(ApiErrorKind::BadApiUrl).into())
}
}),
self.config.get_auth(),
)
};
// the proxy url is discovered from the http_proxy envvar.
if let Some(proxy_username) = self.config.get_proxy_username() {
handle.proxy_username(proxy_username)?;
}
if let Some(proxy_password) = self.config.get_proxy_password() {
handle.proxy_password(proxy_password)?;
}
handle.ssl_verify_host(self.config.should_verify_ssl())?;
handle.ssl_verify_peer(self.config.should_verify_ssl())?;
// This toggles gzipping, useful for uploading large files
handle.transfer_encoding(self.config.allow_transfer_encoding())?;
ApiRequest::new(handle, method, &url, auth)
}
/// Convenience method that performs a `GET` request.
pub fn get(&self, path: &str) -> ApiResult<ApiResponse> {
self.request(Method::Get, path)?.send()
}
/// Convenience method that performs a `DELETE` request.
pub fn delete(&self, path: &str) -> ApiResult<ApiResponse> {
self.request(Method::Delete, path)?.send()
}
/// Convenience method that performs a `POST` request with JSON data.
pub fn post<S: Serialize>(&self, path: &str, body: &S) -> ApiResult<ApiResponse> {
self.request(Method::Post, path)?
.with_json_body(body)?
.send()
}
/// Convenience method that performs a `PUT` request with JSON data.
pub fn put<S: Serialize>(&self, path: &str, body: &S) -> ApiResult<ApiResponse> {
self.request(Method::Put, path)?
.with_json_body(body)?
.send()
}
/// Convenience method that downloads a file into the given file object.
pub fn download(&self, url: &str, dst: &mut fs::File) -> ApiResult<ApiResponse> {
self.request(Method::Get, &url)?
.follow_location(true)?
.send_into(dst)
}
/// Convenience method that downloads a file into the given file object
/// and show a progress bar
pub fn download_with_progress(&self, url: &str, dst: &mut fs::File) -> ApiResult<ApiResponse> {
self.request(Method::Get, &url)?
.follow_location(true)?
.progress_bar_mode(ProgressBarMode::Response)?
.send_into(dst)
}
/// Convenience method that waits for a few seconds until a resource
/// becomes available.
pub fn wait_until_available(&self, url: &str, duration: Duration) -> ApiResult<bool> {
let started = Utc::now();
loop {
match self.request(Method::Get, &url)?.send() {
Ok(_) => return Ok(true),
Err(err) => {
if err.kind() != ApiErrorKind::RequestFailed {
return Err(err);
}
}
}
thread::sleep(Duration::milliseconds(500).to_std().unwrap());
if Utc::now() - duration > started {
return Ok(false);
}
}
}
// High Level Methods
/// Performs an API request to verify the authentication status of the
/// current token.
pub fn get_auth_info(&self) -> ApiResult<AuthInfo> {
self.get("/")?.convert()
}
/// Lists all the release file for the given `release`.
pub fn list_release_files(
&self,
org: &str,
project: Option<&str>,
release: &str,
) -> ApiResult<Vec<Artifact>> {
let path = if let Some(project) = project {
format!(
"/projects/{}/{}/releases/{}/files/",
PathArg(org),
PathArg(project),
PathArg(release)
)
} else {
format!(
"/organizations/{}/releases/{}/files/",
PathArg(org),
PathArg(release)
)
};
self.get(&path)?.convert_rnf(ApiErrorKind::ReleaseNotFound)
}
/// Deletes a single release file. Returns `true` if the file was
/// deleted or `false` otherwise.
pub fn delete_release_file(
&self,
org: &str,
project: Option<&str>,
version: &str,
file_id: &str,
) -> ApiResult<bool> {
let path = if let Some(project) = project {
format!(
"/projects/{}/{}/releases/{}/files/{}/",
PathArg(org),
PathArg(project),
PathArg(version),
PathArg(file_id)
)
} else {
format!(
"/organizations/{}/releases/{}/files/{}/",
PathArg(org),
PathArg(version),
PathArg(file_id)
)
};
let resp = self.delete(&path)?;
if resp.status() == 404 {
Ok(false)
} else {
resp.to_result().map(|_| true)
}
}
/// Uploads a new release file. The file is loaded directly from the file
/// system and uploaded as `name`.
pub fn upload_release_file(
&self,
org: &str,
project: Option<&str>,
version: &str,
contents: FileContents,
name: &str,
dist: Option<&str>,
headers: Option<&[(String, String)]>,
) -> ApiResult<Option<Artifact>> {
let path = if let Some(project) = project {
format!(
"/projects/{}/{}/releases/{}/files/",
PathArg(org),
PathArg(project),
PathArg(version)
)
} else {
format!(
"/organizations/{}/releases/{}/files/",
PathArg(org),
PathArg(version)
)
};
let mut form = curl::easy::Form::new();
match contents {
FileContents::FromPath(path) => {
form.part("file").file(path).add()?;
}
FileContents::FromBytes(bytes) => {
let filename = Path::new(name)
.file_name()
.and_then(|x| x.to_str())
.unwrap_or("unknown.bin");
form.part("file").buffer(filename, bytes.to_vec()).add()?;
}
}
form.part("name").contents(name.as_bytes()).add()?;
if let Some(dist) = dist {
form.part("dist").contents(dist.as_bytes()).add()?;
}
if let Some(headers) = headers {
for &(ref key, ref value) in headers {
form.part("header")
.contents(format!("{}:{}", key, value).as_bytes())
.add()?;
}
}
let resp = self.request(Method::Post, &path)?
.with_form_data(form)?
.progress_bar_mode(ProgressBarMode::Request)?
.send()?;
if resp.status() == 409 {
Ok(None)
} else {
resp.convert_rnf(ApiErrorKind::ReleaseNotFound)
}
}
/// Creates a new release.
pub fn new_release(&self, org: &str, release: &NewRelease) -> ApiResult<ReleaseInfo> {
// for single project releases use the legacy endpoint that is project bound.
// This means we can support both old and new servers.
if release.projects.len() == 1 {
let path = format!(
"/projects/{}/{}/releases/",
PathArg(org),
PathArg(&release.projects[0])
);
self.post(&path, release)?
.convert_rnf(ApiErrorKind::ProjectNotFound)
} else {
let path = format!("/organizations/{}/releases/", PathArg(org));
self.post(&path, release)?
.convert_rnf(ApiErrorKind::OrganizationNotFound)
}
}
/// Updates a release.
pub fn update_release(
&self,
org: &str,
version: &str,
release: &UpdatedRelease,
) -> ApiResult<ReleaseInfo> {
if_chain! {
if let Some(ref projects) = release.projects;
if projects.len() == 1;
then {
let path = format!("/projects/{}/{}/releases/{}/",
PathArg(org),
PathArg(&projects[0]),
PathArg(version)
);
self.put(&path, release)?.convert_rnf(ApiErrorKind::ReleaseNotFound)
} else {
let path = format!("/organizations/{}/releases/{}/",
PathArg(org),
PathArg(version));
self.put(&path, release)?.convert_rnf(ApiErrorKind::ReleaseNotFound)
}
}
}
/// Sets release commits
pub fn set_release_refs(
&self,
org: &str,
version: &str,
refs: Vec<Ref>,
) -> ApiResult<ReleaseInfo> {
let update = UpdatedRelease {
refs: Some(refs),
..Default::default()
};
let path = format!(
"/organizations/{}/releases/{}/",
PathArg(org),
PathArg(version)
);
self.put(&path, &update)?
.convert_rnf(ApiErrorKind::ReleaseNotFound)
}
/// Deletes an already existing release. Returns `true` if it was deleted
/// or `false` if not. The project is needed to support the old deletion
/// API.
pub fn delete_release(
&self,
org: &str,
project: Option<&str>,
version: &str,
) -> ApiResult<bool> {
let resp = if let Some(project) = project {
self.delete(&format!(
"/projects/{}/{}/releases/{}/",
PathArg(org),
PathArg(project),
PathArg(version)
))?
} else {
self.delete(&format!(
"/organizations/{}/releases/{}/",
PathArg(org),
PathArg(version)
))?
};
if resp.status() == 404 {
Ok(false)
} else {
resp.to_result().map(|_| true)
}
}
/// Looks up a release and returns it. If it does not exist `None`
/// will be returned.
pub fn get_release(
&self,
org: &str,
project: Option<&str>,
version: &str,
) -> ApiResult<Option<ReleaseInfo>> {
let path = if let Some(project) = project {
format!(
"/projects/{}/{}/releases/{}/",
PathArg(org),
PathArg(project),
PathArg(version)
)
} else {
format!(
"/organizations/{}/releases/{}/",
PathArg(org),
PathArg(version)
)
};
let resp = self.get(&path)?;
if resp.status() == 404 {
Ok(None)
} else {
resp.convert()
}
}
/// Returns a list of releases for a given project. This is currently a
/// capped list by what the server deems an acceptable default limit.
pub fn list_releases(&self, org: &str, project: Option<&str>) -> ApiResult<Vec<ReleaseInfo>> {
if let Some(project) = project {
let path = format!("/projects/{}/{}/releases/", PathArg(org), PathArg(project));
self.get(&path)?.convert_rnf(ApiErrorKind::ProjectNotFound)
} else {
let path = format!("/organizations/{}/releases/", PathArg(org));
self.get(&path)?
.convert_rnf(ApiErrorKind::OrganizationNotFound)
}
}
/// Creates a new deploy for a release.
pub fn create_deploy(&self, org: &str, version: &str, deploy: &Deploy) -> ApiResult<Deploy> {
let path = format!(
"/organizations/{}/releases/{}/deploys/",
PathArg(org),
PathArg(version)
);
self.post(&path, deploy)?
.convert_rnf(ApiErrorKind::ReleaseNotFound)
}
/// Lists all deploys for a release
pub fn list_deploys(&self, org: &str, version: &str) -> ApiResult<Vec<Deploy>> {
let path = format!(
"/organizations/{}/releases/{}/deploys/",
PathArg(org),
PathArg(version)
);
self.get(&path)?.convert_rnf(ApiErrorKind::ReleaseNotFound)
}
/// Updates a bunch of issues within a project that match a provided filter
/// and performs `changes` changes.
pub fn bulk_update_issue(
&self,
org: &str,
project: &str,
filter: &IssueFilter,
changes: &IssueChanges,
) -> ApiResult<bool> {
let qs = match filter.get_query_string() {
None => {
return Ok(false);
}
Some(qs) => qs,
};
self.put(
&format!(
"/projects/{}/{}/issues/?{}",
PathArg(org),
PathArg(project),
qs
),
changes,
)?
.to_result()
.map(|_| true)
}
/// Finds the latest release for sentry-cli on GitHub.
pub fn get_latest_sentrycli_release(&self) -> ApiResult<Option<SentryCliRelease>> {
let resp = self.get("https://api.github.com/repos/getsentry/sentry-cli/releases/latest")?;
let ref_name = format!("sentry-cli-{}-{}{}", capitalize_string(PLATFORM), ARCH, EXT);
info!("Looking for file named: {}", ref_name);
if resp.status() == 404 {
Ok(None)
} else {
let info: GitHubRelease = resp.to_result()?.convert()?;
for asset in info.assets {
info!("Found asset {}", asset.name);
if asset.name == ref_name {
return Ok(Some(SentryCliRelease {
version: info.tag_name,
download_url: asset.browser_download_url,
}));
}
}
warn!("Unable to find release file");
Ok(None)
}
}
/// Given a list of checksums for DIFs, this returns a list of those
/// that do not exist for the project yet.
pub fn find_missing_dif_checksums<I>(
&self,
org: &str,
project: &str,
checksums: I,
) -> ApiResult<HashSet<Digest>>
where
I: IntoIterator<Item = Digest>,
{
let mut url = format!(
"/projects/{}/{}/files/dsyms/unknown/?",
PathArg(org),
PathArg(project)
);
for (idx, checksum) in checksums.into_iter().enumerate() {
if idx > 0 {
url.push('&');
}
url.push_str("checksums=");
url.push_str(&checksum.to_string());
}
let state: MissingChecksumsResponse = self.get(&url)?.convert()?;
Ok(state.missing)
}
/// Uploads a ZIP archive containing DIFs from the given path.
pub fn upload_dif_archive(
&self,
org: &str,
project: &str,
file: &Path,
) -> ApiResult<Vec<DebugInfoFile>> {
let path = format!(
"/projects/{}/{}/files/dsyms/",
PathArg(org),
PathArg(project)
);
let mut form = curl::easy::Form::new();
form.part("file").file(file).add()?;
self.request(Method::Post, &path)?
.with_form_data(form)?
.progress_bar_mode(ProgressBarMode::Request)?
.send()?
.convert()
}
/// Get the server configuration for chunked file uploads.
pub fn get_chunk_upload_options(&self, org: &str) -> ApiResult<Option<ChunkUploadOptions>> {
let url = format!("/organizations/{}/chunk-upload/", org);
match self.get(&url)?
.convert_rnf(ApiErrorKind::ChunkUploadNotSupported)
{
Ok(options) => Ok(Some(options)),
Err(error) => {
if error.kind() == ApiErrorKind::ChunkUploadNotSupported {
Ok(None)
} else {
Err(error)
}
}
}
}
/// Request DIF assembling and processing from chunks.
pub fn assemble_difs(
&self,
org: &str,
project: &str,
request: &AssembleDifsRequest,
) -> ApiResult<AssembleDifsResponse> {
let url = format!("/projects/{}/{}/files/difs/assemble/", org, project);
self.request(Method::Post, &url)?
.with_json_body(request)?
.send()?
.convert_rnf(ApiErrorKind::ProjectNotFound)
}
/// Compresses a file with the given compression.
fn compress(data: &[u8], compression: ChunkCompression) -> Result<Vec<u8>, io::Error> {
Ok(match compression {
ChunkCompression::Brotli => {
let mut encoder = BrotliEncoder::new(Vec::new(), 6);
encoder.write_all(data)?;
encoder.finish()?
}
ChunkCompression::Gzip => {
let mut encoder = GzEncoder::new(Vec::new(), Default::default());
encoder.write_all(data)?;
encoder.finish()?
}
ChunkCompression::Uncompressed => data.into(),
})
}
/// Upload a batch of file chunks.
pub fn upload_chunks<'data, I, T>(
&self,
url: &str,
chunks: I,
progress_bar_mode: ProgressBarMode,
compression: ChunkCompression,
) -> ApiResult<()>
where
I: IntoIterator<Item = &'data T>,
T: AsRef<(Digest, &'data [u8])> + 'data,
{
// Curl stores a raw pointer to the stringified checksum internally. We first
// transform all checksums to string and keep them in scope until the request
// has completed. The original iterator is not needed anymore after this.
let stringified_chunks: Vec<_> = chunks
.into_iter()
.map(|item| item.as_ref())
.map(|&(checksum, data)| (checksum.to_string(), data))
.collect();
let mut form = curl::easy::Form::new();
for (ref checksum, data) in stringified_chunks {
let name = compression.field_name();
let buffer = Api::compress(data, compression).context(ApiErrorKind::CompressionFailed)?;
form.part(name).buffer(&checksum, buffer).add()?
}
let request = self.request(Method::Post, url)?
.with_form_data(form)?
.progress_bar_mode(progress_bar_mode)?;
// The request is performed to an absolute URL. Thus, `Self::request()` will
// not add the authorization header, by default. Since the URL is guaranteed
// to be a Sentry-compatible endpoint, we force the Authorization header at
// this point.
let request = match Config::get_current().get_auth() {
Some(auth) => request.with_auth(auth)?,
None => request,
};
request.send()?.to_result()?;
Ok(())
}
/// Associate apple debug symbols with a build
pub fn associate_apple_dsyms(
&self,
org: &str,
project: &str,
info_plist: &InfoPlist,
checksums: Vec<String>,
) -> ApiResult<Option<AssociateDsymsResponse>> {
self.associate_dsyms(
org,
project,
&AssociateDsyms {
platform: "apple".to_string(),
checksums: checksums,
name: info_plist.name().to_string(),
app_id: info_plist.bundle_id().to_string(),
version: info_plist.version().to_string(),
build: Some(info_plist.build().to_string()),
},
)
}
/// Associate proguard mappings with an android app
pub fn associate_android_proguard_mappings(
&self,
org: &str,
project: &str,
manifest: &AndroidManifest,
checksums: Vec<String>,
) -> ApiResult<Option<AssociateDsymsResponse>> {
self.associate_dsyms(
org,
project,
&AssociateDsyms {
platform: "android".to_string(),
checksums: checksums,
name: manifest.name(),
app_id: manifest.package().to_string(),
version: manifest.version_name().to_string(),
build: Some(manifest.version_code().to_string()),
},
)
}
/// Associate arbitrary debug symbols with a build
pub fn associate_dsyms(
&self,
org: &str,
project: &str,
data: &AssociateDsyms,
) -> ApiResult<Option<AssociateDsymsResponse>> {
// in case we have no checksums to send up the server does not actually
// let us associate anything. This generally makes sense but means that
// from the client side we need to deal with this separately. In this
// case we just pretend we did a request that did nothing.
if data.checksums.is_empty() {
return Ok(Some(AssociateDsymsResponse {
associated_dsyms: vec![],
}));
}
let path = format!(
"/projects/{}/{}/files/dsyms/associate/",
PathArg(org),
PathArg(project)
);
let resp = self.request(Method::Post, &path)?
.with_json_body(data)?
.send()?;
if resp.status() == 404 {
Ok(None)
} else {
resp.convert()
}
}
/// Triggers reprocessing for a project
pub fn trigger_reprocessing(&self, org: &str, project: &str) -> ApiResult<bool> {
let path = format!(
"/projects/{}/{}/reprocessing/",
PathArg(org),
PathArg(project)
);
let resp = self.request(Method::Post, &path)?.send()?;
if resp.status() == 404 {
Ok(false)
} else {
resp.to_result().map(|_| true)
}
}
/// List all projects associated with an organization
pub fn list_organization_projects(&self, org: &str) -> ApiResult<Vec<Project>> {
self.get(&format!("/organizations/{}/projects/", PathArg(org)))?
.convert_rnf(ApiErrorKind::OrganizationNotFound)
}
/// List all repos associated with an organization
pub fn list_organization_repos(&self, org: &str) -> ApiResult<Vec<Repo>> {
let path = format!("/organizations/{}/repos/", PathArg(org));
let resp = self.request(Method::Get, &path)?.send()?;
if resp.status() == 404 {
Ok(vec![])
} else {
Ok(resp.convert()?)
}
}
}
fn send_req<W: Write>(
handle: &mut curl::easy::Easy,
out: &mut W,
body: Option<Vec<u8>>,
progress_bar_mode: ProgressBarMode,
) -> ApiResult<(u32, Vec<String>)> {
match body {
Some(body) => {
let mut body = &body[..];
handle.upload(true)?;
handle.in_filesize(body.len() as u64)?;
handle_req(handle, out, progress_bar_mode, &mut |buf| {
body.read(buf).unwrap_or(0)
})
}
None => handle_req(handle, out, progress_bar_mode, &mut |_| 0),
}
}
fn handle_req<W: Write>(
handle: &mut curl::easy::Easy,
out: &mut W,
progress_bar_mode: ProgressBarMode,
read: &mut FnMut(&mut [u8]) -> usize,
) -> ApiResult<(u32, Vec<String>)> {
if progress_bar_mode.active() {
handle.progress(true)?;
}
// enable verbose mode
handle.verbose(true)?;
let mut headers = Vec::new();
let pb: Rc<RefCell<Option<ProgressBar>>> = Rc::new(RefCell::new(None));
{
let mut headers = &mut headers;
let mut handle = handle.transfer();
if let ProgressBarMode::Shared((pb_progress, len, idx, counts)) = progress_bar_mode {
handle.progress_function(move |_, _, total, uploaded| {
if uploaded > 0f64 && uploaded < total {
counts.write()[idx] = (uploaded / total * (len as f64)) as u64;
pb_progress.set_position(counts.read().iter().map(|&x| x).sum());
}
true
})?;
} else if progress_bar_mode.active() {
let pb_progress = pb.clone();
handle.progress_function(move |a, b, c, d| {
let (down_len, down_pos, up_len, up_pos) = (a as u64, b as u64, c as u64, d as u64);
let mut pb = pb_progress.borrow_mut();
if up_len > 0 && progress_bar_mode.request() {
if up_pos < up_len {
if pb.is_none() {
*pb = Some(make_byte_progress_bar(up_len));
}
pb.as_ref().unwrap().set_position(up_pos);
} else if pb.is_some() {
pb.take().unwrap().finish_and_clear();
}
}
if down_len > 0 && progress_bar_mode.response() {
if down_pos < down_len {
if pb.is_none() {
*pb = Some(make_byte_progress_bar(down_len));
}
pb.as_ref().unwrap().set_position(down_pos);
} else if pb.is_some() {
pb.take().unwrap().finish_and_clear();
}
}
true
})?;
}
handle.read_function(move |buf| Ok(read(buf)))?;
handle.write_function(move |data| {
Ok(match out.write_all(data) {
Ok(_) => data.len(),
Err(_) => 0,
})
})?;
handle.debug_function(move |info, data| match info {
curl::easy::InfoType::HeaderIn => {
log_headers(false, data);
}
curl::easy::InfoType::HeaderOut => {
log_headers(true, data);
}
_ => {}
})?;
handle.header_function(move |data| {
headers.push(String::from_utf8_lossy(data).into_owned());
true
})?;
handle.perform()?;
}
if pb.borrow().is_some() {
pb.borrow().as_ref().unwrap().finish_and_clear();
}
Ok((handle.response_code()?, headers))
}
/// Iterator over response headers
#[allow(dead_code)]
pub struct Headers<'a> {
lines: &'a [String],
idx: usize,
}
impl<'a> Iterator for Headers<'a> {
type Item = (&'a str, &'a str);
fn next(&mut self) -> Option<(&'a str, &'a str)> {
self.lines.get(self.idx).map(|line| {
self.idx += 1;
match line.find(':') {
Some(i) => (&line[..i], line[i + 1..].trim()),
None => (line[..].trim(), ""),
}
})
}
}
impl<'a> ApiRequest<'a> {
fn new(
mut handle: RefMut<'a, curl::easy::Easy>,
method: Method,
url: &str,
auth: Option<&Auth>,
) -> ApiResult<ApiRequest<'a>> {
info!("request {} {}", method, url);
let mut headers = curl::easy::List::new();
headers.append("Expect:").ok();
headers
.append(&format!("User-Agent: sentry-cli/{}", VERSION))
.ok();
match method {
Method::Get => handle.get(true)?,
Method::Head => {
handle.get(true)?;
handle.custom_request("HEAD")?;
handle.nobody(true)?;
}
Method::Post => handle.custom_request("POST")?,
Method::Put => handle.custom_request("PUT")?,
Method::Delete => handle.custom_request("DELETE")?,
}
handle.url(&url)?;
let request = ApiRequest {
handle: handle,
headers: headers,
body: None,
progress_bar_mode: ProgressBarMode::Disabled,
};
let request = match auth {
Some(auth) => ApiRequest::with_auth(request, auth)?,
None => request,
};
Ok(request)
}
/// Explicitly overrides the Auth info.
pub fn with_auth(mut self, auth: &Auth) -> ApiResult<Self> {
match *auth {
Auth::Key(ref key) => {
self.handle.username(key)?;
info!("using key based authentication");
}
Auth::Token(ref token) => {
self.headers
.append(&format!("Authorization: Bearer {}", token))?;
info!("using token authentication");
}
}
Ok(self)
}
/// adds a specific header to the request
pub fn with_header(mut self, key: &str, value: &str) -> ApiResult<ApiRequest<'a>> {
self.headers.append(&format!("{}: {}", key, value))?;
Ok(self)
}
/// sets the JSON request body for the request.
pub fn with_json_body<S: Serialize>(mut self, body: &S) -> ApiResult<ApiRequest<'a>> {
let mut body_bytes: Vec<u8> = vec![];
serde_json::to_writer(&mut body_bytes, &body).context(ApiErrorKind::CannotSerializeAsJson)?;
info!("sending JSON data ({} bytes)", body_bytes.len());
self.body = Some(body_bytes);
self.headers.append("Content-Type: application/json")?;
Ok(self)
}
/// attaches some form data to the request.
pub fn with_form_data(mut self, form: curl::easy::Form) -> ApiResult<ApiRequest<'a>> {
info!("sending form data");
self.handle.httppost(form)?;
self.body = None;
Ok(self)
}
/// enables or disables redirects. The default is off.
pub fn follow_location(mut self, val: bool) -> ApiResult<ApiRequest<'a>> {
info!("follow redirects: {}", val);
self.handle.follow_location(val)?;
Ok(self)
}
/// enables a progress bar.
pub fn progress_bar_mode(mut self, mode: ProgressBarMode) -> ApiResult<ApiRequest<'a>> {
self.progress_bar_mode = mode;
Ok(self)
}
/// Sends the request and writes response data into the given file
/// instead of the response object's in memory buffer.
pub fn send_into<W: Write>(mut self, out: &mut W) -> ApiResult<ApiResponse> {
self.handle.http_headers(self.headers)?;
let (status, headers) = send_req(&mut self.handle, out, self.body, self.progress_bar_mode)?;
info!("response: {}", status);
Ok(ApiResponse {
status: status,
headers: headers,
body: None,
})
}
/// Sends the request and reads the response body into the response object.
pub fn send(self) -> ApiResult<ApiResponse> {
let mut out = vec![];
let mut rv = self.send_into(&mut out)?;
rv.body = Some(out);
Ok(rv)
}
}
impl ApiResponse {
/// Returns the status code of the response
pub fn status(&self) -> u32 {
self.status
}
/// Indicates that the request failed
pub fn failed(&self) -> bool {
self.status >= 400 && self.status <= 600
}
/// Indicates that the request succeeded
pub fn ok(&self) -> bool {
!self.failed()
}
/// Converts the API response into a result object. This also converts
/// non okay response codes into errors.
pub fn to_result(self) -> ApiResult<ApiResponse> {
if let Some(ref body) = self.body {
info!("body: {}", String::from_utf8_lossy(body));
}
if self.ok() {
return Ok(self);
}
if let Ok(err) = self.deserialize::<ErrorInfo>() {
Err(SentryError {
status: self.status(),
detail: Some(match err {
ErrorInfo::Detail(val) => val,
ErrorInfo::Error(val) => val,
}),
extra: None,
}.context(ApiErrorKind::RequestFailed)
.into())
} else if let Ok(value) = self.deserialize::<serde_json::Value>() {
Err(SentryError {
status: self.status(),
detail: Some("request failure".into()),
extra: Some(value),
}.context(ApiErrorKind::RequestFailed)
.into())
} else {
Err(SentryError {
status: self.status(),
detail: None,
extra: None,
}.context(ApiErrorKind::RequestFailed)
.into())
}
}
/// Deserializes the response body into the given type
pub fn deserialize<T: DeserializeOwned>(&self) -> ApiResult<T> {
if !self.is_json() {
return Err(ApiErrorKind::NotJson.into());
}
Ok(serde_json::from_reader(match self.body {
Some(ref body) => body,
None => &b""[..],
}).context(ApiErrorKind::BadJson)?)
}
/// Like `deserialize` but consumes the response and will convert
/// failed requests into proper errors.
pub fn convert<T: DeserializeOwned>(self) -> ApiResult<T> {
self.to_result().and_then(|x| x.deserialize())
}
/// Like convert but produces resource not found errors.
pub fn convert_rnf<T: DeserializeOwned>(self, res_err: ApiErrorKind) -> ApiResult<T> {
match self.status() {
301 | 302 if res_err == ApiErrorKind::ProjectNotFound => {
#[derive(Deserialize, Debug)]
struct ErrorDetail {
slug: String,
}
#[derive(Deserialize, Debug)]
struct ErrorInfo {
detail: ErrorDetail,
}
match self.convert::<ErrorInfo>() {
Ok(info) => Err(ProjectRenamedError(info.detail.slug)
.context(res_err)
.into()),
Err(_) => Err(res_err.into()),
}
}
404 => Err(res_err.into()),
_ => self.to_result().and_then(|x| x.deserialize()),
}
}
/// Iterates over the headers.
#[allow(dead_code)]
pub fn headers(&self) -> Headers {
Headers {
lines: &self.headers[..],
idx: 0,
}
}
/// Looks up the first matching header for a key.
#[allow(dead_code)]
pub fn get_header(&self, key: &str) -> Option<&str> {
for (header_key, header_value) in self.headers() {
if header_key.eq_ignore_ascii_case(key) {
return Some(header_value);
}
}
None
}
/// Returns true if the response is JSON.
pub fn is_json(&self) -> bool {
self.get_header("content-type")
.and_then(|x| x.split(';').next())
.unwrap_or("") == "application/json"
}
}
fn log_headers(is_response: bool, data: &[u8]) {
lazy_static! {
static ref AUTH_RE: Regex = Regex::new(r"(?i)(authorization):\s*([\w]+)\s+(.*)").unwrap();
}
if let Ok(header) = str::from_utf8(data) {
for line in header.lines() {
if line.is_empty() {
continue;
}
let replaced = AUTH_RE.replace_all(line, |caps: &Captures| {
let info = if &caps[1].to_lowercase() == "basic" {
caps[3].split(':').next().unwrap().to_string()
} else {
format!("{}***", &caps[3][..cmp::min(caps[3].len(), 8)])
};
format!("{}: {} {}", &caps[1], &caps[2], info)
});
info!("{} {}", if is_response { ">" } else { "<" }, replaced);
}
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "lowercase")]
enum ErrorInfo {
Detail(String),
Error(String),
}
/// Provides the auth details (access scopes)
#[derive(Deserialize, Debug)]
pub struct AuthDetails {
pub scopes: Vec<String>,
}
/// Indicates which user signed in
#[derive(Deserialize, Debug)]
pub struct User {
pub email: String,
pub id: String,
}
/// Provides the authentication information
#[derive(Deserialize, Debug)]
pub struct AuthInfo {
pub auth: Option<AuthDetails>,
pub user: Option<User>,
}
/// A release artifact
#[derive(Deserialize, Debug)]
pub struct Artifact {
pub id: String,
pub sha1: String,
pub name: String,
pub size: u64,
pub dist: Option<String>,
pub headers: HashMap<String, String>,
}
impl Artifact {
pub fn get_header<'a, 'b>(&'a self, key: &'b str) -> Option<&'a str> {
let ikey = key.to_lowercase();
for (k, v) in self.headers.iter() {
if k.to_lowercase() == ikey {
return Some(v.as_str());
}
}
None
}
pub fn get_sourcemap_reference(&self) -> Option<&str> {
get_sourcemap_reference_from_headers(self.headers.iter())
}
}
/// Information for new releases
#[derive(Debug, Serialize, Default)]
pub struct NewRelease {
pub version: String,
pub projects: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(rename = "dateStarted", skip_serializing_if = "Option::is_none")]
pub date_started: Option<DateTime<Utc>>,
#[serde(rename = "dateReleased", skip_serializing_if = "Option::is_none")]
pub date_released: Option<DateTime<Utc>>,
}
/// A head commit on a release
#[derive(Debug, Serialize, Default)]
pub struct Ref {
#[serde(rename = "repository")]
pub repo: String,
#[serde(rename = "commit")]
pub rev: String,
#[serde(rename = "previousCommit")]
pub prev_rev: Option<String>,
}
/// Changes to a release
#[derive(Debug, Serialize, Default)]
pub struct UpdatedRelease {
#[serde(skip_serializing_if = "Option::is_none")]
pub projects: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(rename = "dateStarted", skip_serializing_if = "Option::is_none")]
pub date_started: Option<DateTime<Utc>>,
#[serde(rename = "dateReleased", skip_serializing_if = "Option::is_none")]
pub date_released: Option<DateTime<Utc>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub refs: Option<Vec<Ref>>,
}
/// Provides all release information from already existing releases
#[derive(Debug, Deserialize)]
pub struct ReleaseInfo {
pub version: String,
pub url: Option<String>,
#[serde(rename = "dateCreated")]
pub date_created: DateTime<Utc>,
#[serde(rename = "dateReleased")]
pub date_released: Option<DateTime<Utc>>,
#[serde(rename = "lastEvent")]
pub last_event: Option<DateTime<Utc>>,
#[serde(rename = "newGroups")]
pub new_groups: u64,
}
#[derive(Debug, Serialize, Deserialize)]
struct GitHubAsset {
browser_download_url: String,
name: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct GitHubRelease {
tag_name: String,
assets: Vec<GitHubAsset>,
}
/// Information about sentry CLI releases
pub struct SentryCliRelease {
pub version: String,
pub download_url: String,
}
/// Debug information files as processed and stored on the server.
/// Can be dSYMs, ELF debug infos, Breakpad symbols, etc...
#[derive(Debug, Deserialize)]
pub struct DebugInfoFile {
#[serde(rename = "uuid")]
uuid: Option<DebugId>,
#[serde(rename = "debugId")]
id: Option<DebugId>,
#[serde(rename = "objectName")]
pub object_name: String,
#[serde(rename = "cpuName")]
pub cpu_name: String,
#[serde(rename = "sha1")]
pub checksum: String,
}
impl DebugInfoFile {
pub fn id(&self) -> DebugId {
self.id.or(self.uuid).unwrap_or_default()
}
}
#[derive(Debug, Serialize)]
pub struct AssociateDsyms {
pub platform: String,
pub checksums: Vec<String>,
pub name: String,
#[serde(rename = "appId")]
pub app_id: String,
pub version: String,
pub build: Option<String>,
}
#[derive(Deserialize)]
struct MissingChecksumsResponse {
missing: HashSet<Digest>,
}
/// Change information for issue bulk updates.
#[derive(Serialize, Default)]
pub struct IssueChanges {
#[serde(rename = "status")]
pub new_status: Option<String>,
#[serde(rename = "snoozeDuration")]
pub snooze_duration: Option<i64>,
}
/// Filters for issue bulk requests.
pub enum IssueFilter {
/// Match no issues
Empty,
/// Match on all issues
All,
/// Match on the issues with the given IDs
ExplicitIds(Vec<u64>),
/// Match on issues with the given status
Status(String),
}
impl IssueFilter {
fn get_query_string(&self) -> Option<String> {
let mut rv = vec![];
match *self {
IssueFilter::Empty => {
return None;
}
IssueFilter::All => {}
IssueFilter::ExplicitIds(ref ids) => {
if ids.is_empty() {
return None;
}
for id in ids {
rv.push(format!("id={}", id));
}
}
IssueFilter::Status(ref status) => {
rv.push(format!("status={}", status));
}
}
Some(rv.join("&"))
}
}
#[derive(Deserialize)]
pub struct AssociateDsymsResponse {
#[serde(rename = "associatedDsymFiles")]
pub associated_dsyms: Vec<DebugInfoFile>,
}
#[derive(Deserialize, Debug)]
pub struct Team {
pub id: String,
pub slug: String,
pub name: String,
}
#[derive(Deserialize, Debug)]
pub struct Project {
pub id: String,
pub slug: String,
pub name: String,
pub team: Team,
}
#[derive(Deserialize, Debug)]
pub struct RepoProvider {
pub id: String,
pub name: String,
}
#[derive(Deserialize, Debug)]
pub struct Repo {
pub id: String,
pub name: String,
pub url: Option<String>,
pub provider: RepoProvider,
pub status: String,
#[serde(rename = "dateCreated")]
pub date_created: DateTime<Utc>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Deploy {
#[serde(rename = "environment")]
pub env: String,
pub name: Option<String>,
pub url: Option<String>,
#[serde(rename = "dateStarted")]
pub started: Option<DateTime<Utc>>,
#[serde(rename = "dateFinished")]
pub finished: Option<DateTime<Utc>>,
}
#[derive(Debug, Deserialize, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
pub enum ChunkHashAlgorithm {
#[serde(rename = "sha1")]
Sha1,
}
#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)]
pub enum ChunkCompression {
/// No compression should be applied
Uncompressed = 0,
/// GZIP compression (including header)
Gzip = 10,
/// Brotli compression
Brotli = 20,
}
impl ChunkCompression {
fn field_name(&self) -> &'static str {
match *self {
ChunkCompression::Uncompressed => "file",
ChunkCompression::Gzip => "file_gzip",
ChunkCompression::Brotli => "file_brotli",
}
}
}
impl Default for ChunkCompression {
fn default() -> Self {
ChunkCompression::Uncompressed
}
}
impl fmt::Display for ChunkCompression {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ChunkCompression::Uncompressed => write!(f, "uncompressed"),
ChunkCompression::Gzip => write!(f, "gzip"),
ChunkCompression::Brotli => write!(f, "brotli"),
}
}
}
impl<'de> Deserialize<'de> for ChunkCompression {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(match String::deserialize(deserializer)?.as_str() {
"gzip" => ChunkCompression::Gzip,
"brotli" => ChunkCompression::Brotli,
// We do not know this compression, so we assume no compression
_ => ChunkCompression::Uncompressed,
})
}
}
#[derive(Debug, Deserialize)]
pub struct ChunkUploadOptions {
#[serde(rename = "url")]
pub url: String,
#[serde(rename = "chunksPerRequest")]
pub max_chunks: u64,
#[serde(rename = "maxRequestSize")]
pub max_size: u64,
#[serde(rename = "hashAlgorithm")]
pub hash_algorithm: ChunkHashAlgorithm,
#[serde(rename = "chunkSize")]
pub chunk_size: u64,
#[serde(rename = "concurrency")]
pub concurrency: u8,
#[serde(rename = "compression", default)]
pub compression: Vec<ChunkCompression>,
}
#[derive(Debug, Deserialize, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub enum ChunkedFileState {
#[serde(rename = "error")]
Error,
#[serde(rename = "not_found")]
NotFound,
#[serde(rename = "created")]
Created,
#[serde(rename = "assembling")]
Assembling,
#[serde(rename = "ok")]
Ok,
}
impl ChunkedFileState {
pub fn finished(&self) -> bool {
*self == ChunkedFileState::Error || *self == ChunkedFileState::Ok
}
pub fn pending(&self) -> bool {
!self.finished()
}
pub fn ok(&self) -> bool {
*self == ChunkedFileState::Ok
}
}
#[derive(Debug, Serialize)]
pub struct ChunkedDifRequest<'a> {
#[serde(rename = "name")]
pub name: &'a str,
#[serde(rename = "chunks")]
pub chunks: &'a [Digest],
}
#[derive(Debug, Deserialize)]
pub struct ChunkedDifResponse {
#[serde(rename = "state")]
pub state: ChunkedFileState,
#[serde(rename = "missingChunks")]
pub missing_chunks: Vec<Digest>,
#[serde(default, rename = "detail")]
pub detail: Option<String>,
#[serde(default, rename = "dif")]
pub dif: Option<DebugInfoFile>,
}
pub type AssembleDifsRequest<'a> = HashMap<Digest, ChunkedDifRequest<'a>>;
pub type AssembleDifsResponse = HashMap<Digest, ChunkedDifResponse>;
| 30.990971 | 141 | 0.546143 |
f5d943d5dce86cb7e79919923885bf8f32090dab | 40,301 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackupRequest {
#[serde(rename = "azureFileShare", default, skip_serializing_if = "Option::is_none")]
pub azure_file_share: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityParameters {
pub name: String,
#[serde(rename = "type")]
pub type_: check_name_availability_parameters::Type,
}
pub mod check_name_availability_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "Microsoft.StorageSync/storageSyncServices")]
MicrosoftStorageSyncStorageSyncServices,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityResult {
#[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")]
pub name_available: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<check_name_availability_result::Reason>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
pub mod check_name_availability_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Reason {
Invalid,
AlreadyExists,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudEndpoint {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudEndpointProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudEndpointArray {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<CloudEndpoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudEndpointCreateParameters {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudEndpointCreateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudEndpointCreateParametersProperties {
#[serde(rename = "storageAccountResourceId", default, skip_serializing_if = "Option::is_none")]
pub storage_account_resource_id: Option<String>,
#[serde(rename = "azureFileShareName", default, skip_serializing_if = "Option::is_none")]
pub azure_file_share_name: Option<String>,
#[serde(rename = "storageAccountTenantId", default, skip_serializing_if = "Option::is_none")]
pub storage_account_tenant_id: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudEndpointProperties {
#[serde(rename = "storageAccountResourceId", default, skip_serializing_if = "Option::is_none")]
pub storage_account_resource_id: Option<String>,
#[serde(rename = "azureFileShareName", default, skip_serializing_if = "Option::is_none")]
pub azure_file_share_name: Option<String>,
#[serde(rename = "storageAccountTenantId", default, skip_serializing_if = "Option::is_none")]
pub storage_account_tenant_id: Option<String>,
#[serde(rename = "partnershipId", default, skip_serializing_if = "Option::is_none")]
pub partnership_id: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "backupEnabled", default, skip_serializing_if = "Option::is_none")]
pub backup_enabled: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "lastWorkflowId", default, skip_serializing_if = "Option::is_none")]
pub last_workflow_id: Option<String>,
#[serde(rename = "lastOperationName", default, skip_serializing_if = "Option::is_none")]
pub last_operation_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudTieringCachePerformance {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "cacheHitBytes", default, skip_serializing_if = "Option::is_none")]
pub cache_hit_bytes: Option<i64>,
#[serde(rename = "cacheMissBytes", default, skip_serializing_if = "Option::is_none")]
pub cache_miss_bytes: Option<i64>,
#[serde(rename = "cacheHitBytesPercent", default, skip_serializing_if = "Option::is_none")]
pub cache_hit_bytes_percent: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudTieringDatePolicyStatus {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "tieredFilesMostRecentAccessTimestamp", default, skip_serializing_if = "Option::is_none")]
pub tiered_files_most_recent_access_timestamp: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudTieringFilesNotTiering {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "totalFileCount", default, skip_serializing_if = "Option::is_none")]
pub total_file_count: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<FilesNotTieringError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudTieringSpaceSavings {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "volumeSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub volume_size_bytes: Option<i64>,
#[serde(rename = "totalSizeCloudBytes", default, skip_serializing_if = "Option::is_none")]
pub total_size_cloud_bytes: Option<i64>,
#[serde(rename = "cachedSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub cached_size_bytes: Option<i64>,
#[serde(rename = "spaceSavingsPercent", default, skip_serializing_if = "Option::is_none")]
pub space_savings_percent: Option<i32>,
#[serde(rename = "spaceSavingsBytes", default, skip_serializing_if = "Option::is_none")]
pub space_savings_bytes: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudTieringVolumeFreeSpacePolicyStatus {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "effectiveVolumeFreeSpacePolicy", default, skip_serializing_if = "Option::is_none")]
pub effective_volume_free_space_policy: Option<i32>,
#[serde(rename = "currentVolumeFreeSpacePercent", default, skip_serializing_if = "Option::is_none")]
pub current_volume_free_space_percent: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum FeatureStatus {
#[serde(rename = "on")]
On,
#[serde(rename = "off")]
Off,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FilesNotTieringError {
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<i32>,
#[serde(rename = "fileCount", default, skip_serializing_if = "Option::is_none")]
pub file_count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationDirection {
#[serde(rename = "do")]
Do,
#[serde(rename = "undo")]
Undo,
#[serde(rename = "cancel")]
Cancel,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDisplayInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDisplayResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationEntity {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<OperationDisplayInfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationEntityListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OperationEntity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<StorageSyncApiError>,
}
pub type PhysicalPath = String;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostBackupResponse {
#[serde(rename = "backupMetadata", default, skip_serializing_if = "Option::is_none")]
pub backup_metadata: Option<PostBackupResponseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostBackupResponseProperties {
#[serde(rename = "cloudEndpointName", default, skip_serializing_if = "Option::is_none")]
pub cloud_endpoint_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostRestoreRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub partition: Option<String>,
#[serde(rename = "replicaGroup", default, skip_serializing_if = "Option::is_none")]
pub replica_group: Option<String>,
#[serde(rename = "requestId", default, skip_serializing_if = "Option::is_none")]
pub request_id: Option<String>,
#[serde(rename = "azureFileShareUri", default, skip_serializing_if = "Option::is_none")]
pub azure_file_share_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "sourceAzureFileShareUri", default, skip_serializing_if = "Option::is_none")]
pub source_azure_file_share_uri: Option<String>,
#[serde(rename = "failedFileList", default, skip_serializing_if = "Option::is_none")]
pub failed_file_list: Option<String>,
#[serde(rename = "restoreFileSpec", default, skip_serializing_if = "Vec::is_empty")]
pub restore_file_spec: Vec<RestoreFileSpec>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PreRestoreRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub partition: Option<String>,
#[serde(rename = "replicaGroup", default, skip_serializing_if = "Option::is_none")]
pub replica_group: Option<String>,
#[serde(rename = "requestId", default, skip_serializing_if = "Option::is_none")]
pub request_id: Option<String>,
#[serde(rename = "azureFileShareUri", default, skip_serializing_if = "Option::is_none")]
pub azure_file_share_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "sourceAzureFileShareUri", default, skip_serializing_if = "Option::is_none")]
pub source_azure_file_share_uri: Option<String>,
#[serde(rename = "backupMetadataPropertyBag", default, skip_serializing_if = "Option::is_none")]
pub backup_metadata_property_bag: Option<String>,
#[serde(rename = "restoreFileSpec", default, skip_serializing_if = "Vec::is_empty")]
pub restore_file_spec: Vec<RestoreFileSpec>,
#[serde(
rename = "pauseWaitForSyncDrainTimePeriodInSeconds",
default,
skip_serializing_if = "Option::is_none"
)]
pub pause_wait_for_sync_drain_time_period_in_seconds: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProgressType {
#[serde(rename = "none")]
None,
#[serde(rename = "initialize")]
Initialize,
#[serde(rename = "download")]
Download,
#[serde(rename = "upload")]
Upload,
#[serde(rename = "recall")]
Recall,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyResource {
#[serde(flatten)]
pub resource: Resource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecallActionParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pattern: Option<String>,
#[serde(rename = "recallPath", default, skip_serializing_if = "Option::is_none")]
pub recall_path: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegisteredServer {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RegisteredServerProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegisteredServerArray {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RegisteredServer>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegisteredServerCreateParameters {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RegisteredServerCreateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegisteredServerCreateParametersProperties {
#[serde(rename = "serverCertificate", default, skip_serializing_if = "Option::is_none")]
pub server_certificate: Option<String>,
#[serde(rename = "agentVersion", default, skip_serializing_if = "Option::is_none")]
pub agent_version: Option<String>,
#[serde(rename = "serverOSVersion", default, skip_serializing_if = "Option::is_none")]
pub server_os_version: Option<String>,
#[serde(rename = "lastHeartBeat", default, skip_serializing_if = "Option::is_none")]
pub last_heart_beat: Option<String>,
#[serde(rename = "serverRole", default, skip_serializing_if = "Option::is_none")]
pub server_role: Option<String>,
#[serde(rename = "clusterId", default, skip_serializing_if = "Option::is_none")]
pub cluster_id: Option<String>,
#[serde(rename = "clusterName", default, skip_serializing_if = "Option::is_none")]
pub cluster_name: Option<String>,
#[serde(rename = "serverId", default, skip_serializing_if = "Option::is_none")]
pub server_id: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegisteredServerProperties {
#[serde(rename = "serverCertificate", default, skip_serializing_if = "Option::is_none")]
pub server_certificate: Option<String>,
#[serde(rename = "agentVersion", default, skip_serializing_if = "Option::is_none")]
pub agent_version: Option<String>,
#[serde(rename = "serverOSVersion", default, skip_serializing_if = "Option::is_none")]
pub server_os_version: Option<String>,
#[serde(rename = "serverManagementErrorCode", default, skip_serializing_if = "Option::is_none")]
pub server_management_error_code: Option<i64>,
#[serde(rename = "lastHeartBeat", default, skip_serializing_if = "Option::is_none")]
pub last_heart_beat: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "serverRole", default, skip_serializing_if = "Option::is_none")]
pub server_role: Option<String>,
#[serde(rename = "clusterId", default, skip_serializing_if = "Option::is_none")]
pub cluster_id: Option<String>,
#[serde(rename = "clusterName", default, skip_serializing_if = "Option::is_none")]
pub cluster_name: Option<String>,
#[serde(rename = "serverId", default, skip_serializing_if = "Option::is_none")]
pub server_id: Option<String>,
#[serde(rename = "storageSyncServiceUid", default, skip_serializing_if = "Option::is_none")]
pub storage_sync_service_uid: Option<String>,
#[serde(rename = "lastWorkflowId", default, skip_serializing_if = "Option::is_none")]
pub last_workflow_id: Option<String>,
#[serde(rename = "lastOperationName", default, skip_serializing_if = "Option::is_none")]
pub last_operation_name: Option<String>,
#[serde(rename = "discoveryEndpointUri", default, skip_serializing_if = "Option::is_none")]
pub discovery_endpoint_uri: Option<String>,
#[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")]
pub resource_location: Option<String>,
#[serde(rename = "serviceLocation", default, skip_serializing_if = "Option::is_none")]
pub service_location: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "managementEndpointUri", default, skip_serializing_if = "Option::is_none")]
pub management_endpoint_uri: Option<String>,
#[serde(rename = "monitoringConfiguration", default, skip_serializing_if = "Option::is_none")]
pub monitoring_configuration: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
pub type ResourceId = String;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourcesMoveInfo {
#[serde(rename = "targetResourceGroup", default, skip_serializing_if = "Option::is_none")]
pub target_resource_group: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub resources: Vec<ResourceId>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestoreFileSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub isdir: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpoint {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerEndpointProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointArray {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ServerEndpoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerEndpointCloudTieringHealthState {
Healthy,
Error,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointCloudTieringStatus {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub health: Option<ServerEndpointCloudTieringHealthState>,
#[serde(rename = "healthLastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub health_last_updated_timestamp: Option<String>,
#[serde(rename = "lastCloudTieringResult", default, skip_serializing_if = "Option::is_none")]
pub last_cloud_tiering_result: Option<i32>,
#[serde(rename = "lastSuccessTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_success_timestamp: Option<String>,
#[serde(rename = "spaceSavings", default, skip_serializing_if = "Option::is_none")]
pub space_savings: Option<CloudTieringSpaceSavings>,
#[serde(rename = "cachePerformance", default, skip_serializing_if = "Option::is_none")]
pub cache_performance: Option<CloudTieringCachePerformance>,
#[serde(rename = "filesNotTiering", default, skip_serializing_if = "Option::is_none")]
pub files_not_tiering: Option<CloudTieringFilesNotTiering>,
#[serde(rename = "volumeFreeSpacePolicyStatus", default, skip_serializing_if = "Option::is_none")]
pub volume_free_space_policy_status: Option<CloudTieringVolumeFreeSpacePolicyStatus>,
#[serde(rename = "datePolicyStatus", default, skip_serializing_if = "Option::is_none")]
pub date_policy_status: Option<CloudTieringDatePolicyStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointCreateParameters {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerEndpointCreateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointCreateParametersProperties {
#[serde(rename = "serverLocalPath", default, skip_serializing_if = "Option::is_none")]
pub server_local_path: Option<PhysicalPath>,
#[serde(rename = "cloudTiering", default, skip_serializing_if = "Option::is_none")]
pub cloud_tiering: Option<FeatureStatus>,
#[serde(rename = "volumeFreeSpacePercent", default, skip_serializing_if = "Option::is_none")]
pub volume_free_space_percent: Option<i64>,
#[serde(rename = "tierFilesOlderThanDays", default, skip_serializing_if = "Option::is_none")]
pub tier_files_older_than_days: Option<i64>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "serverResourceId", default, skip_serializing_if = "Option::is_none")]
pub server_resource_id: Option<ResourceId>,
#[serde(rename = "offlineDataTransfer", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer: Option<FeatureStatus>,
#[serde(rename = "offlineDataTransferShareName", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer_share_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointFilesNotSyncingError {
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<i32>,
#[serde(rename = "persistentCount", default, skip_serializing_if = "Option::is_none")]
pub persistent_count: Option<i64>,
#[serde(rename = "transientCount", default, skip_serializing_if = "Option::is_none")]
pub transient_count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerEndpointOfflineDataTransferState {
InProgress,
Stopping,
NotRunning,
Complete,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointProperties {
#[serde(rename = "serverLocalPath", default, skip_serializing_if = "Option::is_none")]
pub server_local_path: Option<PhysicalPath>,
#[serde(rename = "cloudTiering", default, skip_serializing_if = "Option::is_none")]
pub cloud_tiering: Option<FeatureStatus>,
#[serde(rename = "volumeFreeSpacePercent", default, skip_serializing_if = "Option::is_none")]
pub volume_free_space_percent: Option<i64>,
#[serde(rename = "tierFilesOlderThanDays", default, skip_serializing_if = "Option::is_none")]
pub tier_files_older_than_days: Option<i64>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "serverResourceId", default, skip_serializing_if = "Option::is_none")]
pub server_resource_id: Option<ResourceId>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "lastWorkflowId", default, skip_serializing_if = "Option::is_none")]
pub last_workflow_id: Option<String>,
#[serde(rename = "lastOperationName", default, skip_serializing_if = "Option::is_none")]
pub last_operation_name: Option<String>,
#[serde(rename = "syncStatus", default, skip_serializing_if = "Option::is_none")]
pub sync_status: Option<ServerEndpointSyncStatus>,
#[serde(rename = "offlineDataTransfer", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer: Option<FeatureStatus>,
#[serde(
rename = "offlineDataTransferStorageAccountResourceId",
default,
skip_serializing_if = "Option::is_none"
)]
pub offline_data_transfer_storage_account_resource_id: Option<String>,
#[serde(
rename = "offlineDataTransferStorageAccountTenantId",
default,
skip_serializing_if = "Option::is_none"
)]
pub offline_data_transfer_storage_account_tenant_id: Option<String>,
#[serde(rename = "offlineDataTransferShareName", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer_share_name: Option<String>,
#[serde(rename = "cloudTieringStatus", default, skip_serializing_if = "Option::is_none")]
pub cloud_tiering_status: Option<ServerEndpointCloudTieringStatus>,
#[serde(rename = "recallStatus", default, skip_serializing_if = "Option::is_none")]
pub recall_status: Option<ServerEndpointRecallStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointRecallError {
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointRecallStatus {
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "totalRecallErrorsCount", default, skip_serializing_if = "Option::is_none")]
pub total_recall_errors_count: Option<i64>,
#[serde(rename = "recallErrors", default, skip_serializing_if = "Vec::is_empty")]
pub recall_errors: Vec<ServerEndpointRecallError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerEndpointSyncActivityState {
Upload,
Download,
UploadAndDownload,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerEndpointSyncHealthState {
Healthy,
Error,
SyncBlockedForRestore,
SyncBlockedForChangeDetectionPostRestore,
NoActivity,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointSyncStatus {
#[serde(rename = "downloadHealth", default, skip_serializing_if = "Option::is_none")]
pub download_health: Option<ServerEndpointSyncHealthState>,
#[serde(rename = "uploadHealth", default, skip_serializing_if = "Option::is_none")]
pub upload_health: Option<ServerEndpointSyncHealthState>,
#[serde(rename = "combinedHealth", default, skip_serializing_if = "Option::is_none")]
pub combined_health: Option<ServerEndpointSyncHealthState>,
#[serde(rename = "syncActivity", default, skip_serializing_if = "Option::is_none")]
pub sync_activity: Option<ServerEndpointSyncActivityState>,
#[serde(rename = "totalPersistentFilesNotSyncingCount", default, skip_serializing_if = "Option::is_none")]
pub total_persistent_files_not_syncing_count: Option<i64>,
#[serde(rename = "lastUpdatedTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_updated_timestamp: Option<String>,
#[serde(rename = "uploadStatus", default, skip_serializing_if = "Option::is_none")]
pub upload_status: Option<SyncSessionStatus>,
#[serde(rename = "downloadStatus", default, skip_serializing_if = "Option::is_none")]
pub download_status: Option<SyncSessionStatus>,
#[serde(rename = "uploadActivity", default, skip_serializing_if = "Option::is_none")]
pub upload_activity: Option<SyncActivityStatus>,
#[serde(rename = "downloadActivity", default, skip_serializing_if = "Option::is_none")]
pub download_activity: Option<SyncActivityStatus>,
#[serde(rename = "offlineDataTransferStatus", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer_status: Option<ServerEndpointOfflineDataTransferState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerEndpointUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerEndpointUpdateProperties {
#[serde(rename = "cloudTiering", default, skip_serializing_if = "Option::is_none")]
pub cloud_tiering: Option<FeatureStatus>,
#[serde(rename = "volumeFreeSpacePercent", default, skip_serializing_if = "Option::is_none")]
pub volume_free_space_percent: Option<i64>,
#[serde(rename = "tierFilesOlderThanDays", default, skip_serializing_if = "Option::is_none")]
pub tier_files_older_than_days: Option<i64>,
#[serde(rename = "offlineDataTransfer", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer: Option<FeatureStatus>,
#[serde(rename = "offlineDataTransferShareName", default, skip_serializing_if = "Option::is_none")]
pub offline_data_transfer_share_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncApiError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<StorageSyncErrorDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<StorageSyncApiError>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub innererror: Option<StorageSyncApiError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncErrorDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncService {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<StorageSyncServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncServiceArray {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<StorageSyncService>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncServiceCreateParameters {
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncServiceProperties {
#[serde(rename = "storageSyncServiceStatus", default, skip_serializing_if = "Option::is_none")]
pub storage_sync_service_status: Option<i64>,
#[serde(rename = "storageSyncServiceUid", default, skip_serializing_if = "Option::is_none")]
pub storage_sync_service_uid: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncServiceUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<StorageSyncServiceUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSyncServiceUpdateProperties {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<subscription_state::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub istransitioning: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SubscriptionStateProperties>,
}
pub mod subscription_state {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Registered,
Unregistered,
Warned,
Suspended,
Deleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionStateProperties {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncActivityStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(rename = "perItemErrorCount", default, skip_serializing_if = "Option::is_none")]
pub per_item_error_count: Option<i64>,
#[serde(rename = "appliedItemCount", default, skip_serializing_if = "Option::is_none")]
pub applied_item_count: Option<i64>,
#[serde(rename = "totalItemCount", default, skip_serializing_if = "Option::is_none")]
pub total_item_count: Option<i64>,
#[serde(rename = "appliedBytes", default, skip_serializing_if = "Option::is_none")]
pub applied_bytes: Option<i64>,
#[serde(rename = "totalBytes", default, skip_serializing_if = "Option::is_none")]
pub total_bytes: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroup {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SyncGroupProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupArray {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncGroup>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupCreateParameters {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SyncGroupCreateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupCreateParametersProperties {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupProperties {
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
#[serde(rename = "syncGroupStatus", default, skip_serializing_if = "Option::is_none")]
pub sync_group_status: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncSessionStatus {
#[serde(rename = "lastSyncResult", default, skip_serializing_if = "Option::is_none")]
pub last_sync_result: Option<i32>,
#[serde(rename = "lastSyncTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_sync_timestamp: Option<String>,
#[serde(rename = "lastSyncSuccessTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_sync_success_timestamp: Option<String>,
#[serde(rename = "lastSyncPerItemErrorCount", default, skip_serializing_if = "Option::is_none")]
pub last_sync_per_item_error_count: Option<i64>,
#[serde(rename = "persistentFilesNotSyncingCount", default, skip_serializing_if = "Option::is_none")]
pub persistent_files_not_syncing_count: Option<i64>,
#[serde(rename = "transientFilesNotSyncingCount", default, skip_serializing_if = "Option::is_none")]
pub transient_files_not_syncing_count: Option<i64>,
#[serde(rename = "filesNotSyncingErrors", default, skip_serializing_if = "Vec::is_empty")]
pub files_not_syncing_errors: Vec<ServerEndpointFilesNotSyncingError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagsObject {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerChangeDetectionParameters {
#[serde(rename = "directoryPath", default, skip_serializing_if = "Option::is_none")]
pub directory_path: Option<String>,
#[serde(rename = "changeDetectionMode", default, skip_serializing_if = "Option::is_none")]
pub change_detection_mode: Option<trigger_change_detection_parameters::ChangeDetectionMode>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub paths: Vec<String>,
}
pub mod trigger_change_detection_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ChangeDetectionMode {
Default,
Recursive,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerRolloverRequest {
#[serde(rename = "serverCertificate", default, skip_serializing_if = "Option::is_none")]
pub server_certificate: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Workflow {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkflowProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkflowArray {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Workflow>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkflowProperties {
#[serde(rename = "lastStepName", default, skip_serializing_if = "Option::is_none")]
pub last_step_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<WorkflowStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<OperationDirection>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub steps: Option<String>,
#[serde(rename = "lastOperationId", default, skip_serializing_if = "Option::is_none")]
pub last_operation_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WorkflowStatus {
#[serde(rename = "active")]
Active,
#[serde(rename = "expired")]
Expired,
#[serde(rename = "succeeded")]
Succeeded,
#[serde(rename = "aborted")]
Aborted,
#[serde(rename = "failed")]
Failed,
}
| 49.44908 | 111 | 0.732488 |
21358eb276380a64e5a31b67aade666cab8ccf7a | 57,134 | //! Spans represent periods of time in which a program was executing in a
//! particular context.
//!
//! A span consists of [fields], user-defined key-value pairs of arbitrary data
//! that describe the context the span represents, and a set of fixed attributes
//! that describe all `tracing` spans and events. Attributes describing spans
//! include:
//!
//! - An [`Id`] assigned by the subscriber that uniquely identifies it in relation
//! to other spans.
//! - The span's [parent] in the trace tree.
//! - [Metadata] that describes static characteristics of all spans
//! originating from that callsite, such as its name, source code location,
//! [verbosity level], and the names of its fields.
//!
//! # Creating Spans
//!
//! Spans are created using the [`span!`] macro. This macro is invoked with the
//! following arguments, in order:
//!
//! - The [`target`] and/or [`parent`][parent] attributes, if the user wishes to
//! override their default values.
//! - The span's [verbosity level]
//! - A string literal providing the span's name.
//! - Finally, between zero and 32 arbitrary key/value fields.
//!
//! [`target`]: super::Metadata::target
//!
//! For example:
//! ```rust
//! use tracing::{span, Level};
//!
//! /// Construct a new span at the `INFO` level named "my_span", with a single
//! /// field named answer , with the value `42`.
//! let my_span = span!(Level::INFO, "my_span", answer = 42);
//! ```
//!
//! The documentation for the [`span!`] macro provides additional examples of
//! the various options that exist when creating spans.
//!
//! The [`trace_span!`], [`debug_span!`], [`info_span!`], [`warn_span!`], and
//! [`error_span!`] exist as shorthand for constructing spans at various
//! verbosity levels.
//!
//! ## Recording Span Creation
//!
//! The [`Attributes`] type contains data associated with a span, and is
//! provided to the [`Subscriber`] when a new span is created. It contains
//! the span's metadata, the ID of [the span's parent][parent] if one was
//! explicitly set, and any fields whose values were recorded when the span was
//! constructed. The subscriber, which is responsible for recording `tracing`
//! data, can then store or record these values.
//!
//! # The Span Lifecycle
//!
//! ## Entering a Span
//!
//! A thread of execution is said to _enter_ a span when it begins executing,
//! and _exit_ the span when it switches to another context. Spans may be
//! entered through the [`enter`], [`entered`], and [`in_scope`] methods.
//!
//! The [`enter`] method enters a span, returning a [guard] that exits the span
//! when dropped
//! ```
//! # use tracing::{span, Level};
//! let my_var: u64 = 5;
//! let my_span = span!(Level::TRACE, "my_span", my_var);
//!
//! // `my_span` exists but has not been entered.
//!
//! // Enter `my_span`...
//! let _enter = my_span.enter();
//!
//! // Perform some work inside of the context of `my_span`...
//! // Dropping the `_enter` guard will exit the span.
//!```
//!
//! <div class="example-wrap" style="display:inline-block"><pre class="compile_fail" style="white-space:normal;font:inherit;">
//! <strong>Warning</strong>: In asynchronous code that uses async/await syntax,
//! <code>Span::enter</code> may produce incorrect traces if the returned drop
//! guard is held across an await point. See
//! <a href="struct.Span.html#in-asynchronous-code">the method documentation</a>
//! for details.
//! </pre></div>
//!
//! The [`entered`] method is analogous to [`enter`], but moves the span into
//! the returned guard, rather than borrowing it. This allows creating and
//! entering a span in a single expression:
//!
//! ```
//! # use tracing::{span, Level};
//! // Create a span and enter it, returning a guard:
//! let span = span!(Level::INFO, "my_span").entered();
//!
//! // We are now inside the span! Like `enter()`, the guard returned by
//! // `entered()` will exit the span when it is dropped...
//!
//! // ...but, it can also be exited explicitly, returning the `Span`
//! // struct:
//! let span = span.exit();
//! ```
//!
//! Finally, [`in_scope`] takes a closure or function pointer and executes it
//! inside the span:
//!
//! ```
//! # use tracing::{span, Level};
//! let my_var: u64 = 5;
//! let my_span = span!(Level::TRACE, "my_span", my_var = &my_var);
//!
//! my_span.in_scope(|| {
//! // perform some work in the context of `my_span`...
//! });
//!
//! // Perform some work outside of the context of `my_span`...
//!
//! my_span.in_scope(|| {
//! // Perform some more work in the context of `my_span`.
//! });
//! ```
//!
//! <pre class="ignore" style="white-space:normal;font:inherit;">
//! <strong>Note</strong>: Since entering a span takes <code>&self</code>, and
//! <code>Span</code>s are <code>Clone</code>, <code>Send</code>, and
//! <code>Sync</code>, it is entirely valid for multiple threads to enter the
//! same span concurrently.
//! </pre>
//!
//! ## Span Relationships
//!
//! Spans form a tree structure — unless it is a root span, all spans have a
//! _parent_, and may have one or more _children_. When a new span is created,
//! the current span becomes the new span's parent. The total execution time of
//! a span consists of the time spent in that span and in the entire subtree
//! represented by its children. Thus, a parent span always lasts for at least
//! as long as the longest-executing span in its subtree.
//!
//! ```
//! # use tracing::{Level, span};
//! // this span is considered the "root" of a new trace tree:
//! span!(Level::INFO, "root").in_scope(|| {
//! // since we are now inside "root", this span is considered a child
//! // of "root":
//! span!(Level::DEBUG, "outer_child").in_scope(|| {
//! // this span is a child of "outer_child", which is in turn a
//! // child of "root":
//! span!(Level::TRACE, "inner_child").in_scope(|| {
//! // and so on...
//! });
//! });
//! // another span created here would also be a child of "root".
//! });
//!```
//!
//! In addition, the parent of a span may be explicitly specified in
//! the `span!` macro. For example:
//!
//! ```rust
//! # use tracing::{Level, span};
//! // Create, but do not enter, a span called "foo".
//! let foo = span!(Level::INFO, "foo");
//!
//! // Create and enter a span called "bar".
//! let bar = span!(Level::INFO, "bar");
//! let _enter = bar.enter();
//!
//! // Although we have currently entered "bar", "baz"'s parent span
//! // will be "foo".
//! let baz = span!(parent: &foo, Level::INFO, "baz");
//! ```
//!
//! A child span should typically be considered _part_ of its parent. For
//! example, if a subscriber is recording the length of time spent in various
//! spans, it should generally include the time spent in a span's children as
//! part of that span's duration.
//!
//! In addition to having zero or one parent, a span may also _follow from_ any
//! number of other spans. This indicates a causal relationship between the span
//! and the spans that it follows from, but a follower is *not* typically
//! considered part of the duration of the span it follows. Unlike the parent, a
//! span may record that it follows from another span after it is created, using
//! the [`follows_from`] method.
//!
//! As an example, consider a listener task in a server. As the listener accepts
//! incoming connections, it spawns new tasks that handle those connections. We
//! might want to have a span representing the listener, and instrument each
//! spawned handler task with its own span. We would want our instrumentation to
//! record that the handler tasks were spawned as a result of the listener task.
//! However, we might not consider the handler tasks to be _part_ of the time
//! spent in the listener task, so we would not consider those spans children of
//! the listener span. Instead, we would record that the handler tasks follow
//! from the listener, recording the causal relationship but treating the spans
//! as separate durations.
//!
//! ## Closing Spans
//!
//! Execution may enter and exit a span multiple times before that span is
//! _closed_. Consider, for example, a future which has an associated
//! span and enters that span every time it is polled:
//! ```rust
//! # use std::future::Future;
//! # use std::task::{Context, Poll};
//! # use std::pin::Pin;
//! struct MyFuture {
//! // data
//! span: tracing::Span,
//! }
//!
//! impl Future for MyFuture {
//! type Output = ();
//!
//! fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
//! let _enter = self.span.enter();
//! // Do actual future work...
//! # Poll::Ready(())
//! }
//! }
//! ```
//!
//! If this future was spawned on an executor, it might yield one or more times
//! before `poll` returns [`Poll::Ready`]. If the future were to yield, then
//! the executor would move on to poll the next future, which may _also_ enter
//! an associated span or series of spans. Therefore, it is valid for a span to
//! be entered repeatedly before it completes. Only the time when that span or
//! one of its children was the current span is considered to be time spent in
//! that span. A span which is not executing and has not yet been closed is said
//! to be _idle_.
//!
//! Because spans may be entered and exited multiple times before they close,
//! [`Subscriber`]s have separate trait methods which are called to notify them
//! of span exits and when span handles are dropped. When execution exits a
//! span, [`exit`] will always be called with that span's ID to notify the
//! subscriber that the span has been exited. When span handles are dropped, the
//! [`drop_span`] method is called with that span's ID. The subscriber may use
//! this to determine whether or not the span will be entered again.
//!
//! If there is only a single handle with the capacity to exit a span, dropping
//! that handle "closes" the span, since the capacity to enter it no longer
//! exists. For example:
//! ```
//! # use tracing::{Level, span};
//! {
//! span!(Level::TRACE, "my_span").in_scope(|| {
//! // perform some work in the context of `my_span`...
//! }); // --> Subscriber::exit(my_span)
//!
//! // The handle to `my_span` only lives inside of this block; when it is
//! // dropped, the subscriber will be informed via `drop_span`.
//!
//! } // --> Subscriber::drop_span(my_span)
//! ```
//!
//! However, if multiple handles exist, the span can still be re-entered even if
//! one or more is dropped. For determining when _all_ handles to a span have
//! been dropped, `Subscriber`s have a [`clone_span`] method, which is called
//! every time a span handle is cloned. Combined with `drop_span`, this may be
//! used to track the number of handles to a given span — if `drop_span` has
//! been called one more time than the number of calls to `clone_span` for a
//! given ID, then no more handles to the span with that ID exist. The
//! subscriber may then treat it as closed.
//!
//! # When to use spans
//!
//! As a rule of thumb, spans should be used to represent discrete units of work
//! (e.g., a given request's lifetime in a server) or periods of time spent in a
//! given context (e.g., time spent interacting with an instance of an external
//! system, such as a database).
//!
//! Which scopes in a program correspond to new spans depend somewhat on user
//! intent. For example, consider the case of a loop in a program. Should we
//! construct one span and perform the entire loop inside of that span, like:
//!
//! ```rust
//! # use tracing::{Level, span};
//! # let n = 1;
//! let span = span!(Level::TRACE, "my_loop");
//! let _enter = span.enter();
//! for i in 0..n {
//! # let _ = i;
//! // ...
//! }
//! ```
//! Or, should we create a new span for each iteration of the loop, as in:
//! ```rust
//! # use tracing::{Level, span};
//! # let n = 1u64;
//! for i in 0..n {
//! let span = span!(Level::TRACE, "my_loop", iteration = i);
//! let _enter = span.enter();
//! // ...
//! }
//! ```
//!
//! Depending on the circumstances, we might want to do either, or both. For
//! example, if we want to know how long was spent in the loop overall, we would
//! create a single span around the entire loop; whereas if we wanted to know how
//! much time was spent in each individual iteration, we would enter a new span
//! on every iteration.
//!
//! [fields]: super::field
//! [Metadata]: super::Metadata
//! [verbosity level]: super::Level
//! [`Poll::Ready`]: std::task::Poll::Ready
//! [`span!`]: super::span!
//! [`trace_span!`]: super::trace_span!
//! [`debug_span!`]: super::debug_span!
//! [`info_span!`]: super::info_span!
//! [`warn_span!`]: super::warn_span!
//! [`error_span!`]: super::error_span!
//! [`clone_span`]: super::subscriber::Subscriber::clone_span()
//! [`drop_span`]: super::subscriber::Subscriber::drop_span()
//! [`exit`]: super::subscriber::Subscriber::exit
//! [`Subscriber`]: super::subscriber::Subscriber
//! [`enter`]: Span::enter()
//! [`entered`]: Span::entered()
//! [`in_scope`]: Span::in_scope()
//! [`follows_from`]: Span::follows_from()
//! [guard]: Entered
//! [parent]: #span-relationships
pub use tracing_core::span::{Attributes, Id, Record};
use crate::stdlib::{
cmp, fmt,
hash::{Hash, Hasher},
marker::PhantomData,
mem,
ops::Deref,
};
use crate::{
dispatcher::{self, Dispatch},
field, Metadata,
};
/// Trait implemented by types which have a span `Id`.
pub trait AsId: crate::sealed::Sealed {
/// Returns the `Id` of the span that `self` corresponds to, or `None` if
/// this corresponds to a disabled span.
fn as_id(&self) -> Option<&Id>;
}
/// A handle representing a span, with the capability to enter the span if it
/// exists.
///
/// If the span was rejected by the current `Subscriber`'s filter, entering the
/// span will silently do nothing. Thus, the handle can be used in the same
/// manner regardless of whether or not the trace is currently being collected.
#[derive(Clone)]
pub struct Span {
/// A handle used to enter the span when it is not executing.
///
/// If this is `None`, then the span has either closed or was never enabled.
inner: Option<Inner>,
/// Metadata describing the span.
///
/// This might be `Some` even if `inner` is `None`, in the case that the
/// span is disabled but the metadata is needed for `log` support.
meta: Option<&'static Metadata<'static>>,
}
/// A handle representing the capacity to enter a span which is known to exist.
///
/// Unlike `Span`, this type is only constructed for spans which _have_ been
/// enabled by the current filter. This type is primarily used for implementing
/// span handles; users should typically not need to interact with it directly.
#[derive(Debug)]
pub(crate) struct Inner {
/// The span's ID, as provided by `subscriber`.
id: Id,
/// The subscriber that will receive events relating to this span.
///
/// This should be the same subscriber that provided this span with its
/// `id`.
subscriber: Dispatch,
}
/// A guard representing a span which has been entered and is currently
/// executing.
///
/// When the guard is dropped, the span will be exited.
///
/// This is returned by the [`Span::enter`] function.
///
/// [`Span::enter`]: super::Span::enter
#[derive(Debug)]
#[must_use = "once a span has been entered, it should be exited"]
pub struct Entered<'a> {
span: &'a Span,
}
/// An owned version of [`Entered`], a guard representing a span which has been
/// entered and is currently executing.
///
/// When the guard is dropped, the span will be exited.
///
/// This is returned by the [`Span::entered`] function.
///
/// [`Span::entered`]: super::Span::entered()
#[derive(Debug)]
#[must_use = "once a span has been entered, it should be exited"]
pub struct EnteredSpan {
span: Span,
/// ```compile_fail
/// use tracing::span::*;
/// trait AssertSend: Send {}
///
/// impl AssertSend for EnteredSpan {}
/// ```
_not_send: PhantomNotSend,
}
/// `log` target for all span lifecycle (creation/enter/exit/close) records.
#[cfg(feature = "log")]
const LIFECYCLE_LOG_TARGET: &str = "tracing::span";
/// `log` target for span activity (enter/exit) records.
#[cfg(feature = "log")]
const ACTIVITY_LOG_TARGET: &str = "tracing::span::active";
// ===== impl Span =====
impl Span {
/// Constructs a new `Span` with the given [metadata] and set of
/// [field values].
///
/// The new span will be constructed by the currently-active [`Subscriber`],
/// with the current span as its parent (if one exists).
///
/// After the span is constructed, [field values] and/or [`follows_from`]
/// annotations may be added to it.
///
/// [metadata]: super::Metadata
/// [`Subscriber`]: super::subscriber::Subscriber
/// [field values]: super::field::ValueSet
/// [`follows_from`]: super::Span::follows_from
pub fn new(meta: &'static Metadata<'static>, values: &field::ValueSet<'_>) -> Span {
dispatcher::get_default(|dispatch| Self::new_with(meta, values, dispatch))
}
#[inline]
#[doc(hidden)]
pub fn new_with(
meta: &'static Metadata<'static>,
values: &field::ValueSet<'_>,
dispatch: &Dispatch,
) -> Span {
let new_span = Attributes::new(meta, values);
Self::make_with(meta, new_span, dispatch)
}
/// Constructs a new `Span` as the root of its own trace tree, with the
/// given [metadata] and set of [field values].
///
/// After the span is constructed, [field values] and/or [`follows_from`]
/// annotations may be added to it.
///
/// [metadata]: super::Metadata
/// [field values]: super::field::ValueSet
/// [`follows_from`]: super::Span::follows_from
pub fn new_root(meta: &'static Metadata<'static>, values: &field::ValueSet<'_>) -> Span {
dispatcher::get_default(|dispatch| Self::new_root_with(meta, values, dispatch))
}
#[inline]
#[doc(hidden)]
pub fn new_root_with(
meta: &'static Metadata<'static>,
values: &field::ValueSet<'_>,
dispatch: &Dispatch,
) -> Span {
let new_span = Attributes::new_root(meta, values);
Self::make_with(meta, new_span, dispatch)
}
/// Constructs a new `Span` as child of the given parent span, with the
/// given [metadata] and set of [field values].
///
/// After the span is constructed, [field values] and/or [`follows_from`]
/// annotations may be added to it.
///
/// [metadata]: super::Metadata
/// [field values]: super::field::ValueSet
/// [`follows_from`]: super::Span::follows_from
pub fn child_of(
parent: impl Into<Option<Id>>,
meta: &'static Metadata<'static>,
values: &field::ValueSet<'_>,
) -> Span {
let mut parent = parent.into();
dispatcher::get_default(move |dispatch| {
Self::child_of_with(Option::take(&mut parent), meta, values, dispatch)
})
}
#[inline]
#[doc(hidden)]
pub fn child_of_with(
parent: impl Into<Option<Id>>,
meta: &'static Metadata<'static>,
values: &field::ValueSet<'_>,
dispatch: &Dispatch,
) -> Span {
let new_span = match parent.into() {
Some(parent) => Attributes::child_of(parent, meta, values),
None => Attributes::new_root(meta, values),
};
Self::make_with(meta, new_span, dispatch)
}
/// Constructs a new disabled span with the given `Metadata`.
///
/// This should be used when a span is constructed from a known callsite,
/// but the subscriber indicates that it is disabled.
///
/// Entering, exiting, and recording values on this span will not notify the
/// `Subscriber` but _may_ record log messages if the `log` feature flag is
/// enabled.
#[inline(always)]
pub fn new_disabled(meta: &'static Metadata<'static>) -> Span {
Self {
inner: None,
meta: Some(meta),
}
}
/// Constructs a new span that is *completely disabled*.
///
/// This can be used rather than `Option<Span>` to represent cases where a
/// span is not present.
///
/// Entering, exiting, and recording values on this span will do nothing.
#[inline(always)]
pub const fn none() -> Span {
Self {
inner: None,
meta: None,
}
}
/// Returns a handle to the span [considered by the `Subscriber`] to be the
/// current span.
///
/// If the subscriber indicates that it does not track the current span, or
/// that the thread from which this function is called is not currently
/// inside a span, the returned span will be disabled.
///
/// [considered by the `Subscriber`]:
/// super::subscriber::Subscriber::current_span
pub fn current() -> Span {
dispatcher::get_default(|dispatch| {
if let Some((id, meta)) = dispatch.current_span().into_inner() {
let id = dispatch.clone_span(&id);
Self {
inner: Some(Inner::new(id, dispatch)),
meta: Some(meta),
}
} else {
Self::none()
}
})
}
fn make_with(
meta: &'static Metadata<'static>,
new_span: Attributes<'_>,
dispatch: &Dispatch,
) -> Span {
let attrs = &new_span;
let id = dispatch.new_span(attrs);
let inner = Some(Inner::new(id, dispatch));
let span = Self {
inner,
meta: Some(meta),
};
if_log_enabled! { *meta.level(), {
let target = if attrs.is_empty() {
LIFECYCLE_LOG_TARGET
} else {
meta.target()
};
let values = attrs.values();
span.log(
target,
level_to_log!(*meta.level()),
format_args!("++ {};{}", meta.name(), crate::log::LogValueSet { values, is_first: false }),
);
}}
span
}
/// Enters this span, returning a guard that will exit the span when dropped.
///
/// If this span is enabled by the current subscriber, then this function will
/// call [`Subscriber::enter`] with the span's [`Id`], and dropping the guard
/// will call [`Subscriber::exit`]. If the span is disabled, this does
/// nothing.
///
/// # In Asynchronous Code
///
/// **Warning**: in asynchronous code that uses [async/await syntax][syntax],
/// `Span::enter` should be used very carefully or avoided entirely. Holding
/// the drop guard returned by `Span::enter` across `.await` points will
/// result in incorrect traces. For example,
///
/// ```
/// # use tracing::info_span;
/// # async fn some_other_async_function() {}
/// async fn my_async_function() {
/// let span = info_span!("my_async_function");
///
/// // WARNING: This span will remain entered until this
/// // guard is dropped...
/// let _enter = span.enter();
/// // ...but the `await` keyword may yield, causing the
/// // runtime to switch to another task, while remaining in
/// // this span!
/// some_other_async_function().await
///
/// // ...
/// }
/// ```
///
/// The drop guard returned by `Span::enter` exits the span when it is
/// dropped. When an async function or async block yields at an `.await`
/// point, the current scope is _exited_, but values in that scope are
/// **not** dropped (because the async block will eventually resume
/// execution from that await point). This means that _another_ task will
/// begin executing while _remaining_ in the entered span. This results in
/// an incorrect trace.
///
/// Instead of using `Span::enter` in asynchronous code, prefer the
/// following:
///
/// * To enter a span for a synchronous section of code within an async
/// block or function, prefer [`Span::in_scope`]. Since `in_scope` takes a
/// synchronous closure and exits the span when the closure returns, the
/// span will always be exited before the next await point. For example:
/// ```
/// # use tracing::info_span;
/// # async fn some_other_async_function(_: ()) {}
/// async fn my_async_function() {
/// let span = info_span!("my_async_function");
///
/// let some_value = span.in_scope(|| {
/// // run some synchronous code inside the span...
/// });
///
/// // This is okay! The span has already been exited before we reach
/// // the await point.
/// some_other_async_function(some_value).await;
///
/// // ...
/// }
/// ```
/// * For instrumenting asynchronous code, `tracing` provides the
/// [`Future::instrument` combinator][instrument] for
/// attaching a span to a future (async function or block). This will
/// enter the span _every_ time the future is polled, and exit it whenever
/// the future yields.
///
/// `Instrument` can be used with an async block inside an async function:
/// ```ignore
/// # use tracing::info_span;
/// use tracing::Instrument;
///
/// # async fn some_other_async_function() {}
/// async fn my_async_function() {
/// let span = info_span!("my_async_function");
/// async move {
/// // This is correct! If we yield here, the span will be exited,
/// // and re-entered when we resume.
/// some_other_async_function().await;
///
/// //more asynchronous code inside the span...
///
/// }
/// // instrument the async block with the span...
/// .instrument(span)
/// // ...and await it.
/// .await
/// }
/// ```
///
/// It can also be used to instrument calls to async functions at the
/// callsite:
/// ```ignore
/// # use tracing::debug_span;
/// use tracing::Instrument;
///
/// # async fn some_other_async_function() {}
/// async fn my_async_function() {
/// let some_value = some_other_async_function()
/// .instrument(debug_span!("some_other_async_function"))
/// .await;
///
/// // ...
/// }
/// ```
///
/// * The [`#[instrument]` attribute macro][attr] can automatically generate
/// correct code when used on an async function:
///
/// ```ignore
/// # async fn some_other_async_function() {}
/// #[tracing::instrument(level = "info")]
/// async fn my_async_function() {
///
/// // This is correct! If we yield here, the span will be exited,
/// // and re-entered when we resume.
/// some_other_async_function().await;
///
/// // ...
///
/// }
/// ```
///
/// [syntax]: https://rust-lang.github.io/async-book/01_getting_started/04_async_await_primer.html
/// [`Span::in_scope`]: Span::in_scope()
/// [instrument]: crate::Instrument
/// [attr]: macro@crate::instrument
///
/// # Examples
///
/// ```
/// # use tracing::{span, Level};
/// let span = span!(Level::INFO, "my_span");
/// let guard = span.enter();
///
/// // code here is within the span
///
/// drop(guard);
///
/// // code here is no longer within the span
///
/// ```
///
/// Guards need not be explicitly dropped:
///
/// ```
/// # use tracing::trace_span;
/// fn my_function() -> String {
/// // enter a span for the duration of this function.
/// let span = trace_span!("my_function");
/// let _enter = span.enter();
///
/// // anything happening in functions we call is still inside the span...
/// my_other_function();
///
/// // returning from the function drops the guard, exiting the span.
/// return "Hello world".to_owned();
/// }
///
/// fn my_other_function() {
/// // ...
/// }
/// ```
///
/// Sub-scopes may be created to limit the duration for which the span is
/// entered:
///
/// ```
/// # use tracing::{info, info_span};
/// let span = info_span!("my_great_span");
///
/// {
/// let _enter = span.enter();
///
/// // this event occurs inside the span.
/// info!("i'm in the span!");
///
/// // exiting the scope drops the guard, exiting the span.
/// }
///
/// // this event is not inside the span.
/// info!("i'm outside the span!")
/// ```
///
/// [`Subscriber::enter`]: super::subscriber::Subscriber::enter()
/// [`Subscriber::exit`]: super::subscriber::Subscriber::exit()
/// [`Id`]: super::Id
#[inline(always)]
pub fn enter(&self) -> Entered<'_> {
self.do_enter();
Entered { span: self }
}
/// Enters this span, consuming it and returning a [guard][`EnteredSpan`]
/// that will exit the span when dropped.
///
/// <pre class="compile_fail" style="white-space:normal;font:inherit;">
/// <strong>Warning</strong>: In asynchronous code that uses async/await syntax,
/// <code>Span::entered</code> may produce incorrect traces if the returned drop
/// guard is held across an await point. See <a href="#in-asynchronous-code">the
/// <code>Span::enter</code> documentation</a> for details.
/// </pre>
///
///
/// If this span is enabled by the current subscriber, then this function will
/// call [`Subscriber::enter`] with the span's [`Id`], and dropping the guard
/// will call [`Subscriber::exit`]. If the span is disabled, this does
/// nothing.
///
/// This is similar to the [`Span::enter`] method, except that it moves the
/// span by value into the returned guard, rather than borrowing it.
/// Therefore, this method can be used to create and enter a span in a
/// single expression, without requiring a `let`-binding. For example:
///
/// ```
/// # use tracing::info_span;
/// let _span = info_span!("something_interesting").entered();
/// ```
/// rather than:
/// ```
/// # use tracing::info_span;
/// let span = info_span!("something_interesting");
/// let _e = span.enter();
/// ```
///
/// Furthermore, `entered` may be used when the span must be stored in some
/// other struct or be passed to a function while remaining entered.
///
/// <pre class="ignore" style="white-space:normal;font:inherit;">
/// <strong>Note</strong>: The returned <a href="../struct.EnteredSpan.html">
/// <code>EnteredSpan</a></code> guard does not implement <code>Send</code>.
/// Dropping the guard will exit <em>this</em> span, and if the guard is sent
/// to another thread and dropped there, that thread may never have entered
/// this span. Thus, <code>EnteredSpan</code>s should not be sent between threads.
/// </pre>
///
/// [syntax]: https://rust-lang.github.io/async-book/01_getting_started/04_async_await_primer.html
///
/// # Examples
///
/// The returned guard can be [explicitly exited][EnteredSpan::exit],
/// returning the un-entered span:
///
/// ```
/// # use tracing::{Level, span};
/// let span = span!(Level::INFO, "doing_something").entered();
///
/// // code here is within the span
///
/// // explicitly exit the span, returning it
/// let span = span.exit();
///
/// // code here is no longer within the span
///
/// // enter the span again
/// let span = span.entered();
///
/// // now we are inside the span once again
/// ```
///
/// Guards need not be explicitly dropped:
///
/// ```
/// # use tracing::trace_span;
/// fn my_function() -> String {
/// // enter a span for the duration of this function.
/// let span = trace_span!("my_function").entered();
///
/// // anything happening in functions we call is still inside the span...
/// my_other_function();
///
/// // returning from the function drops the guard, exiting the span.
/// return "Hello world".to_owned();
/// }
///
/// fn my_other_function() {
/// // ...
/// }
/// ```
///
/// Since the [`EnteredSpan`] guard can dereference to the [`Span`] itself,
/// the span may still be accessed while entered. For example:
///
/// ```rust
/// # use tracing::info_span;
/// use tracing::field;
///
/// // create the span with an empty field, and enter it.
/// let span = info_span!("my_span", some_field = field::Empty).entered();
///
/// // we can still record a value for the field while the span is entered.
/// span.record("some_field", &"hello world!");
/// ```
///
/// [`Subscriber::enter`]: super::subscriber::Subscriber::enter()
/// [`Subscriber::exit`]: super::subscriber::Subscriber::exit()
/// [`Id`]: super::Id
#[inline(always)]
pub fn entered(self) -> EnteredSpan {
self.do_enter();
EnteredSpan {
span: self,
_not_send: PhantomNotSend,
}
}
/// Returns this span, if it was [enabled] by the current [`Subscriber`], or
/// the [current span] (whose lexical distance may be further than expected),
/// if this span [is disabled].
///
/// This method can be useful when propagating spans to spawned threads or
/// [async tasks]. Consider the following:
///
/// ```
/// let _parent_span = tracing::info_span!("parent").entered();
///
/// // ...
///
/// let child_span = tracing::debug_span!("child");
///
/// std::thread::spawn(move || {
/// let _entered = child_span.entered();
///
/// tracing::info!("spawned a thread!");
///
/// // ...
/// });
/// ```
///
/// If the current [`Subscriber`] enables the [`DEBUG`] level, then both
/// the "parent" and "child" spans will be enabled. Thus, when the "spawaned
/// a thread!" event occurs, it will be inside of the "child" span. Because
/// "parent" is the parent of "child", the event will _also_ be inside of
/// "parent".
///
/// However, if the [`Subscriber`] only enables the [`INFO`] level, the "child"
/// span will be disabled. When the thread is spawned, the
/// `child_span.entered()` call will do nothing, since "child" is not
/// enabled. In this case, the "spawned a thread!" event occurs outside of
/// *any* span, since the "child" span was responsible for propagating its
/// parent to the spawned thread.
///
/// If this is not the desired behavior, `Span::or_current` can be used to
/// ensure that the "parent" span is propagated in both cases, either as a
/// parent of "child" _or_ directly. For example:
///
/// ```
/// let _parent_span = tracing::info_span!("parent").entered();
///
/// // ...
///
/// // If DEBUG is enabled, then "child" will be enabled, and `or_current`
/// // returns "child". Otherwise, if DEBUG is not enabled, "child" will be
/// // disabled, and `or_current` returns "parent".
/// let child_span = tracing::debug_span!("child").or_current();
///
/// std::thread::spawn(move || {
/// let _entered = child_span.entered();
///
/// tracing::info!("spawned a thread!");
///
/// // ...
/// });
/// ```
///
/// When spawning [asynchronous tasks][async tasks], `Span::or_current` can
/// be used similarly, in combination with [`instrument`]:
///
/// ```
/// use tracing::Instrument;
/// # // lol
/// # mod tokio {
/// # pub(super) fn spawn(_: impl std::future::Future) {}
/// # }
///
/// let _parent_span = tracing::info_span!("parent").entered();
///
/// // ...
///
/// let child_span = tracing::debug_span!("child");
///
/// tokio::spawn(
/// async {
/// tracing::info!("spawned a task!");
///
/// // ...
///
/// }.instrument(child_span.or_current())
/// );
/// ```
///
/// In general, `or_current` should be preferred over nesting an
/// [`instrument`] call inside of an [`in_current_span`] call, as using
/// `or_current` will be more efficient.
///
/// ```
/// use tracing::Instrument;
/// # // lol
/// # mod tokio {
/// # pub(super) fn spawn(_: impl std::future::Future) {}
/// # }
/// async fn my_async_fn() {
/// // ...
/// }
///
/// let _parent_span = tracing::info_span!("parent").entered();
///
/// // Do this:
/// tokio::spawn(
/// my_async_fn().instrument(tracing::debug_span!("child").or_current())
/// );
///
/// // ...rather than this:
/// tokio::spawn(
/// my_async_fn()
/// .instrument(tracing::debug_span!("child"))
/// .in_current_span()
/// );
/// ```
///
/// [enabled]: crate::Subscriber::enabled
/// [`Subscriber`]: crate::Subscriber
/// [current span]: Span::current
/// [is disabled]: Span::is_disabled
/// [`INFO`]: crate::Level::INFO
/// [`DEBUG`]: crate::Level::DEBUG
/// [async tasks]: std::task
/// [`instrument`]: crate::instrument::Instrument::instrument
/// [`in_current_span`]: crate::instrument::Instrument::in_current_span
pub fn or_current(self) -> Self {
if self.is_disabled() {
return Self::current();
}
self
}
#[inline(always)]
fn do_enter(&self) {
if let Some(inner) = self.inner.as_ref() {
inner.subscriber.enter(&inner.id);
}
if_log_enabled! { crate::Level::TRACE, {
if let Some(_meta) = self.meta {
self.log(ACTIVITY_LOG_TARGET, log::Level::Trace, format_args!("-> {};", _meta.name()));
}
}}
}
// Called from [`Entered`] and [`EnteredSpan`] drops.
//
// Running this behaviour on drop rather than with an explicit function
// call means that spans may still be exited when unwinding.
#[inline(always)]
fn do_exit(&self) {
if let Some(inner) = self.inner.as_ref() {
inner.subscriber.exit(&inner.id);
}
if_log_enabled! { crate::Level::TRACE, {
if let Some(_meta) = self.meta {
self.log(ACTIVITY_LOG_TARGET, log::Level::Trace, format_args!("<- {};", _meta.name()));
}
}}
}
/// Executes the given function in the context of this span.
///
/// If this span is enabled, then this function enters the span, invokes `f`
/// and then exits the span. If the span is disabled, `f` will still be
/// invoked, but in the context of the currently-executing span (if there is
/// one).
///
/// Returns the result of evaluating `f`.
///
/// # Examples
///
/// ```
/// # use tracing::{trace, span, Level};
/// let my_span = span!(Level::TRACE, "my_span");
///
/// my_span.in_scope(|| {
/// // this event occurs within the span.
/// trace!("i'm in the span!");
/// });
///
/// // this event occurs outside the span.
/// trace!("i'm not in the span!");
/// ```
///
/// Calling a function and returning the result:
/// ```
/// # use tracing::{info_span, Level};
/// fn hello_world() -> String {
/// "Hello world!".to_owned()
/// }
///
/// let span = info_span!("hello_world");
/// // the span will be entered for the duration of the call to
/// // `hello_world`.
/// let a_string = span.in_scope(hello_world);
///
pub fn in_scope<F: FnOnce() -> T, T>(&self, f: F) -> T {
let _enter = self.enter();
f()
}
/// Returns a [`Field`][super::field::Field] for the field with the
/// given `name`, if one exists,
pub fn field<Q: ?Sized>(&self, field: &Q) -> Option<field::Field>
where
Q: field::AsField,
{
self.metadata().and_then(|meta| field.as_field(meta))
}
/// Returns true if this `Span` has a field for the given
/// [`Field`][super::field::Field] or field name.
#[inline]
pub fn has_field<Q: ?Sized>(&self, field: &Q) -> bool
where
Q: field::AsField,
{
self.field(field).is_some()
}
/// Records that the field described by `field` has the value `value`.
///
/// This may be used with [`field::Empty`] to declare fields whose values
/// are not known when the span is created, and record them later:
/// ```
/// use tracing::{trace_span, field};
///
/// // Create a span with two fields: `greeting`, with the value "hello world", and
/// // `parting`, without a value.
/// let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty);
///
/// // ...
///
/// // Now, record a value for parting as well.
/// // (note that the field name is passed as a string slice)
/// span.record("parting", &"goodbye world!");
/// ```
/// However, it may also be used to record a _new_ value for a field whose
/// value was already recorded:
/// ```
/// use tracing::info_span;
/// # fn do_something() -> Result<(), ()> { Err(()) }
///
/// // Initially, let's assume that our attempt to do something is going okay...
/// let span = info_span!("doing_something", is_okay = true);
/// let _e = span.enter();
///
/// match do_something() {
/// Ok(something) => {
/// // ...
/// }
/// Err(_) => {
/// // Things are no longer okay!
/// span.record("is_okay", &false);
/// }
/// }
/// ```
///
/// <pre class="ignore" style="white-space:normal;font:inherit;">
/// <strong>Note</strong>: The fields associated with a span are part
/// of its <a href="../struct.Metadata.html"><code>Metadata</code></a>.
/// The <a href="../struct.Metadata.html"><code>Metadata</code></a>
/// describing a particular span is constructed statically when the span
/// is created and cannot be extended later to add new fields. Therefore,
/// you cannot record a value for a field that was not specified when the
/// span was created:
/// </pre>
///
/// ```
/// use tracing::{trace_span, field};
///
/// // Create a span with two fields: `greeting`, with the value "hello world", and
/// // `parting`, without a value.
/// let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty);
///
/// // ...
///
/// // Now, you try to record a value for a new field, `new_field`, which was not
/// // declared as `Empty` or populated when you created `span`.
/// // You won't get any error, but the assignment will have no effect!
/// span.record("new_field", &"interesting_value_you_really_need");
///
/// // Instead, all fields that may be recorded after span creation should be declared up front,
/// // using field::Empty when a value is not known, as we did for `parting`.
/// // This `record` call will indeed replace field::Empty with "you will be remembered".
/// span.record("parting", &"you will be remembered");
/// ```
///
/// [`field::Empty`]: super::field::Empty
/// [`Metadata`]: super::Metadata
pub fn record<Q: ?Sized, V>(&self, field: &Q, value: &V) -> &Self
where
Q: field::AsField,
V: field::Value,
{
if let Some(meta) = self.meta {
if let Some(field) = field.as_field(meta) {
self.record_all(
&meta
.fields()
.value_set(&[(&field, Some(value as &dyn field::Value))]),
);
}
}
self
}
/// Records all the fields in the provided `ValueSet`.
pub fn record_all(&self, values: &field::ValueSet<'_>) -> &Self {
let record = Record::new(values);
if let Some(ref inner) = self.inner {
inner.record(&record);
}
if let Some(_meta) = self.meta {
if_log_enabled! { *_meta.level(), {
let target = if record.is_empty() {
LIFECYCLE_LOG_TARGET
} else {
_meta.target()
};
self.log(
target,
level_to_log!(*_meta.level()),
format_args!("{};{}", _meta.name(), crate::log::LogValueSet { values, is_first: false }),
);
}}
}
self
}
/// Returns `true` if this span was disabled by the subscriber and does not
/// exist.
///
/// See also [`is_none`].
///
/// [`is_none`]: Span::is_none()
#[inline]
pub fn is_disabled(&self) -> bool {
self.inner.is_none()
}
/// Returns `true` if this span was constructed by [`Span::none`] and is
/// empty.
///
/// If `is_none` returns `true` for a given span, then [`is_disabled`] will
/// also return `true`. However, when a span is disabled by the subscriber
/// rather than constructed by `Span::none`, this method will return
/// `false`, while `is_disabled` will return `true`.
///
/// [`Span::none`]: Span::none()
/// [`is_disabled`]: Span::is_disabled()
#[inline]
pub fn is_none(&self) -> bool {
self.is_disabled() && self.meta.is_none()
}
/// Indicates that the span with the given ID has an indirect causal
/// relationship with this span.
///
/// This relationship differs somewhat from the parent-child relationship: a
/// span may have any number of prior spans, rather than a single one; and
/// spans are not considered to be executing _inside_ of the spans they
/// follow from. This means that a span may close even if subsequent spans
/// that follow from it are still open, and time spent inside of a
/// subsequent span should not be included in the time its precedents were
/// executing. This is used to model causal relationships such as when a
/// single future spawns several related background tasks, et cetera.
///
/// If this span is disabled, or the resulting follows-from relationship
/// would be invalid, this function will do nothing.
///
/// # Examples
///
/// Setting a `follows_from` relationship with a `Span`:
/// ```
/// # use tracing::{span, Id, Level, Span};
/// let span1 = span!(Level::INFO, "span_1");
/// let span2 = span!(Level::DEBUG, "span_2");
/// span2.follows_from(span1);
/// ```
///
/// Setting a `follows_from` relationship with the current span:
/// ```
/// # use tracing::{span, Id, Level, Span};
/// let span = span!(Level::INFO, "hello!");
/// span.follows_from(Span::current());
/// ```
///
/// Setting a `follows_from` relationship with a `Span` reference:
/// ```
/// # use tracing::{span, Id, Level, Span};
/// let span = span!(Level::INFO, "hello!");
/// let curr = Span::current();
/// span.follows_from(&curr);
/// ```
///
/// Setting a `follows_from` relationship with an `Id`:
/// ```
/// # use tracing::{span, Id, Level, Span};
/// let span = span!(Level::INFO, "hello!");
/// let id = span.id();
/// span.follows_from(id);
/// ```
pub fn follows_from(&self, from: impl Into<Option<Id>>) -> &Self {
if let Some(ref inner) = self.inner {
if let Some(from) = from.into() {
inner.follows_from(&from);
}
}
self
}
/// Returns this span's `Id`, if it is enabled.
pub fn id(&self) -> Option<Id> {
self.inner.as_ref().map(Inner::id)
}
/// Returns this span's `Metadata`, if it is enabled.
pub fn metadata(&self) -> Option<&'static Metadata<'static>> {
self.meta
}
#[cfg(feature = "log")]
#[inline]
fn log(&self, target: &str, level: log::Level, message: fmt::Arguments<'_>) {
if let Some(meta) = self.meta {
if level_to_log!(*meta.level()) <= log::max_level() {
let logger = log::logger();
let log_meta = log::Metadata::builder().level(level).target(target).build();
if logger.enabled(&log_meta) {
if let Some(ref inner) = self.inner {
logger.log(
&log::Record::builder()
.metadata(log_meta)
.module_path(meta.module_path())
.file(meta.file())
.line(meta.line())
.args(format_args!("{} span={}", message, inner.id.into_u64()))
.build(),
);
} else {
logger.log(
&log::Record::builder()
.metadata(log_meta)
.module_path(meta.module_path())
.file(meta.file())
.line(meta.line())
.args(message)
.build(),
);
}
}
}
}
}
/// Invokes a function with a reference to this span's ID and subscriber.
///
/// if this span is enabled, the provided function is called, and the result is returned.
/// If the span is disabled, the function is not called, and this method returns `None`
/// instead.
pub fn with_subscriber<T>(&self, f: impl FnOnce((&Id, &Dispatch)) -> T) -> Option<T> {
self.inner
.as_ref()
.map(|inner| f((&inner.id, &inner.subscriber)))
}
}
impl cmp::PartialEq for Span {
fn eq(&self, other: &Self) -> bool {
match (&self.meta, &other.meta) {
(Some(this), Some(that)) => {
this.callsite() == that.callsite() && self.inner == other.inner
}
_ => false,
}
}
}
impl Hash for Span {
fn hash<H: Hasher>(&self, hasher: &mut H) {
self.inner.hash(hasher);
}
}
impl fmt::Debug for Span {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut span = f.debug_struct("Span");
if let Some(meta) = self.meta {
span.field("name", &meta.name())
.field("level", &meta.level())
.field("target", &meta.target());
if let Some(ref inner) = self.inner {
span.field("id", &inner.id());
} else {
span.field("disabled", &true);
}
if let Some(ref path) = meta.module_path() {
span.field("module_path", &path);
}
if let Some(ref line) = meta.line() {
span.field("line", &line);
}
if let Some(ref file) = meta.file() {
span.field("file", &file);
}
} else {
span.field("none", &true);
}
span.finish()
}
}
impl<'a> From<&'a Span> for Option<&'a Id> {
fn from(span: &'a Span) -> Self {
span.inner.as_ref().map(|inner| &inner.id)
}
}
impl<'a> From<&'a Span> for Option<Id> {
fn from(span: &'a Span) -> Self {
span.inner.as_ref().map(Inner::id)
}
}
impl From<Span> for Option<Id> {
fn from(span: Span) -> Self {
span.inner.as_ref().map(Inner::id)
}
}
impl<'a> From<&'a EnteredSpan> for Option<&'a Id> {
fn from(span: &'a EnteredSpan) -> Self {
span.inner.as_ref().map(|inner| &inner.id)
}
}
impl<'a> From<&'a EnteredSpan> for Option<Id> {
fn from(span: &'a EnteredSpan) -> Self {
span.inner.as_ref().map(Inner::id)
}
}
impl Drop for Span {
#[inline(always)]
fn drop(&mut self) {
if let Some(Inner {
ref id,
ref subscriber,
}) = self.inner
{
subscriber.try_close(id.clone());
}
if_log_enabled! { crate::Level::TRACE, {
if let Some(meta) = self.meta {
self.log(
LIFECYCLE_LOG_TARGET,
log::Level::Trace,
format_args!("-- {};", meta.name()),
);
}
}}
}
}
// ===== impl Inner =====
impl Inner {
/// Indicates that the span with the given ID has an indirect causal
/// relationship with this span.
///
/// This relationship differs somewhat from the parent-child relationship: a
/// span may have any number of prior spans, rather than a single one; and
/// spans are not considered to be executing _inside_ of the spans they
/// follow from. This means that a span may close even if subsequent spans
/// that follow from it are still open, and time spent inside of a
/// subsequent span should not be included in the time its precedents were
/// executing. This is used to model causal relationships such as when a
/// single future spawns several related background tasks, et cetera.
///
/// If this span is disabled, this function will do nothing. Otherwise, it
/// returns `Ok(())` if the other span was added as a precedent of this
/// span, or an error if this was not possible.
fn follows_from(&self, from: &Id) {
self.subscriber.record_follows_from(&self.id, from)
}
/// Returns the span's ID.
fn id(&self) -> Id {
self.id.clone()
}
fn record(&self, values: &Record<'_>) {
self.subscriber.record(&self.id, values)
}
fn new(id: Id, subscriber: &Dispatch) -> Self {
Inner {
id,
subscriber: subscriber.clone(),
}
}
}
impl cmp::PartialEq for Inner {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Hash for Inner {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl Clone for Inner {
fn clone(&self) -> Self {
Inner {
id: self.subscriber.clone_span(&self.id),
subscriber: self.subscriber.clone(),
}
}
}
// ===== impl Entered =====
impl EnteredSpan {
/// Returns this span's `Id`, if it is enabled.
pub fn id(&self) -> Option<Id> {
self.inner.as_ref().map(Inner::id)
}
/// Exits this span, returning the underlying [`Span`].
#[inline]
pub fn exit(mut self) -> Span {
// One does not simply move out of a struct with `Drop`.
let span = mem::replace(&mut self.span, Span::none());
span.do_exit();
span
}
}
impl Deref for EnteredSpan {
type Target = Span;
#[inline]
fn deref(&self) -> &Span {
&self.span
}
}
impl<'a> Drop for Entered<'a> {
#[inline(always)]
fn drop(&mut self) {
self.span.do_exit()
}
}
impl Drop for EnteredSpan {
#[inline(always)]
fn drop(&mut self) {
self.span.do_exit()
}
}
/// Technically, `EnteredSpan` _can_ implement both `Send` *and*
/// `Sync` safely. It doesn't, because it has a `PhantomNotSend` field,
/// specifically added in order to make it `!Send`.
///
/// Sending an `EnteredSpan` guard between threads cannot cause memory unsafety.
/// However, it *would* result in incorrect behavior, so we add a
/// `PhantomNotSend` to prevent it from being sent between threads. This is
/// because it must be *dropped* on the same thread that it was created;
/// otherwise, the span will never be exited on the thread where it was entered,
/// and it will attempt to exit the span on a thread that may never have entered
/// it. However, we still want them to be `Sync` so that a struct holding an
/// `Entered` guard can be `Sync`.
///
/// Thus, this is totally safe.
#[derive(Debug)]
struct PhantomNotSend {
ghost: PhantomData<*mut ()>,
}
#[allow(non_upper_case_globals)]
const PhantomNotSend: PhantomNotSend = PhantomNotSend { ghost: PhantomData };
/// # Safety
///
/// Trivially safe, as `PhantomNotSend` doesn't have any API.
unsafe impl Sync for PhantomNotSend {}
#[cfg(test)]
mod test {
use super::*;
trait AssertSend: Send {}
impl AssertSend for Span {}
trait AssertSync: Sync {}
impl AssertSync for Span {}
impl AssertSync for Entered<'_> {}
impl AssertSync for EnteredSpan {}
}
| 35.311496 | 126 | 0.575489 |
48e8e530261bea39d8b1586d2f60cde8233ee65a | 273 | #![warn(clippy::single_component_path_imports)]
#![allow(unused_imports)]
use self::regex::{Regex as xeger, RegexSet as tesxeger};
pub use self::{
regex::{Regex, RegexSet},
some_mod::SomeType,
};
use regex;
mod some_mod {
pub struct SomeType;
}
fn main() {}
| 17.0625 | 56 | 0.681319 |
bf8a67238ceb90f4d19eddedc97b5058a9bebd55 | 8,253 | use super::{create, LinkerdInject};
use kube::ResourceExt;
use linkerd_policy_controller_k8s_api::{self as k8s};
use maplit::{btreemap, convert_args};
use tokio::time;
#[derive(Clone)]
#[must_use]
pub struct Runner {
namespace: String,
client: kube::Client,
}
#[derive(Clone)]
pub struct Running {
namespace: String,
name: String,
client: kube::Client,
}
impl Runner {
pub async fn init(client: &kube::Client, ns: &str) -> Runner {
let runner = Runner {
namespace: ns.to_string(),
client: client.clone(),
};
runner.create_rbac().await;
runner
}
/// Creates a configmap that prevents curl pods from executing.
pub async fn create_lock(&self) {
create(
&self.client,
k8s::api::core::v1::ConfigMap {
metadata: k8s::ObjectMeta {
namespace: Some(self.namespace.clone()),
name: Some("curl-lock".to_string()),
..Default::default()
},
..Default::default()
},
)
.await;
}
/// Deletes the lock configmap, allowing curl pods to execute.
pub async fn delete_lock(&self) {
tracing::trace!(ns = %self.namespace, "Deleting curl-lock");
kube::Api::<k8s::api::core::v1::ConfigMap>::namespaced(
self.client.clone(),
&self.namespace,
)
.delete("curl-lock", &kube::api::DeleteParams::foreground())
.await
.expect("curl-lock must be deleted");
tracing::debug!(ns = %self.namespace, "Deleted curl-lock");
}
/// Runs a [`k8s::Pod`] that runs curl against the provided target URL.
///
/// The pod:
/// - has the `linkerd.io/inject` annotation set, based on the
/// `linkerd_inject` parameter;
/// - runs under the service account `curl`;
/// - does not actually execute curl until the `curl-lock` configmap is not
/// present
pub async fn run(&self, name: &str, target_url: &str, inject: LinkerdInject) -> Running {
create(
&self.client,
Self::gen_pod(&self.namespace, name, target_url, inject),
)
.await;
Running {
client: self.client.clone(),
namespace: self.namespace.clone(),
name: name.to_string(),
}
}
/// Creates a service account and RBAC to allow curl pods to watch the
/// curl-lock configmap.
async fn create_rbac(&self) {
create(
&self.client,
k8s::api::core::v1::ServiceAccount {
metadata: k8s::ObjectMeta {
namespace: Some(self.namespace.clone()),
name: Some("curl".to_string()),
..Default::default()
},
..Default::default()
},
)
.await;
create(
&self.client,
k8s::api::rbac::v1::Role {
metadata: k8s::ObjectMeta {
namespace: Some(self.namespace.clone()),
name: Some("curl-lock".to_string()),
..Default::default()
},
rules: Some(vec![k8s::api::rbac::v1::PolicyRule {
api_groups: Some(vec!["".to_string()]),
resources: Some(vec!["configmaps".to_string()]),
verbs: vec!["get".to_string(), "list".to_string(), "watch".to_string()],
..Default::default()
}]),
},
)
.await;
create(
&self.client,
k8s::api::rbac::v1::RoleBinding {
metadata: k8s::ObjectMeta {
namespace: Some(self.namespace.clone()),
name: Some("curl-lock".to_string()),
..Default::default()
},
role_ref: k8s::api::rbac::v1::RoleRef {
api_group: "rbac.authorization.k8s.io".to_string(),
kind: "Role".to_string(),
name: "curl-lock".to_string(),
},
subjects: Some(vec![k8s::api::rbac::v1::Subject {
kind: "ServiceAccount".to_string(),
name: "curl".to_string(),
namespace: Some(self.namespace.clone()),
..Default::default()
}]),
},
)
.await;
}
fn gen_pod(ns: &str, name: &str, target_url: &str, inject: LinkerdInject) -> k8s::Pod {
k8s::Pod {
metadata: k8s::ObjectMeta {
namespace: Some(ns.to_string()),
name: Some(name.to_string()),
annotations: Some(convert_args!(btreemap!(
"linkerd.io/inject" => inject.to_string(),
"config.linkerd.io/proxy-log-level" => "linkerd=trace,info",
))),
..Default::default()
},
spec: Some(k8s::PodSpec {
service_account: Some("curl".to_string()),
init_containers: Some(vec![k8s::api::core::v1::Container {
name: "wait-for-nginx".to_string(),
image: Some("docker.io/bitnami/kubectl:latest".to_string()),
args: Some(
vec![
"wait",
"--timeout=60s",
"--for=delete",
"--namespace",
ns,
"cm",
"curl-lock",
]
.into_iter()
.map(Into::into)
.collect(),
),
..Default::default()
}]),
containers: vec![k8s::api::core::v1::Container {
name: "curl".to_string(),
image: Some("docker.io/curlimages/curl:latest".to_string()),
args: Some(
vec!["curl", "-sSfv", target_url]
.into_iter()
.map(Into::into)
.collect(),
),
..Default::default()
}],
restart_policy: Some("Never".to_string()),
..Default::default()
}),
..k8s::Pod::default()
}
}
}
impl Running {
pub fn name(&self) -> &str {
&self.name
}
/// Waits for the pod to have an IP address and returns it.
pub async fn ip(&self) -> std::net::IpAddr {
super::await_pod_ip(&self.client, &self.namespace, &self.name).await
}
/// Waits for the curl container to complete and returns its exit code.
pub async fn exit_code(self) -> i32 {
fn get_exit_code(pod: &k8s::Pod) -> Option<i32> {
let c = pod
.status
.as_ref()?
.container_statuses
.as_ref()?
.iter()
.find(|c| c.name == "curl")?;
let code = c.state.as_ref()?.terminated.as_ref()?.exit_code;
tracing::debug!(ns = %pod.namespace().unwrap(), pod = %pod.name(), %code, "Curl exited");
Some(code)
}
tracing::debug!(ns = %self.namespace, pod = %self.name, "Waiting for exit code");
let api = kube::Api::namespaced(self.client.clone(), &self.namespace);
let finished = kube::runtime::wait::await_condition(
api.clone(),
&self.name,
|obj: Option<&k8s::Pod>| -> bool { obj.and_then(get_exit_code).is_some() },
);
match time::timeout(time::Duration::from_secs(60), finished).await {
Ok(Ok(())) => {}
Ok(Err(error)) => panic!("Failed to wait for exit code: {}: {}", self.name, error),
Err(_timeout) => panic!("Timeout waiting for exit code: {}", self.name),
};
let curl_pod = api.get(&self.name).await.expect("pod must exist");
get_exit_code(&curl_pod).expect("curl pod must have an exit code")
}
}
| 35.573276 | 101 | 0.469163 |
fec1e561809993d3f5283a627b58f431cf078e94 | 2,205 | #[derive(Debug)]
struct Character {
class: String,
hp: u16,
mana: u16
}
impl Character {
// Create a new Char struct
fn new(class: &str, hp: u16, mana: u16) -> Character {
Character {
class: class.to_string(),
hp: hp,
mana: mana
}
}
// calculate total product of hp and mana
fn total_power(&self) -> u16 {
self.hp * self.mana
}
// How many hits char can take base on opponents attack
fn maximum_hits(&self, attack: u16) -> u16 {
self.hp / attack
}
fn take_hit(&mut self, damage: u16) {
if damage >= self.hp {
self.hp = 0;
} else if damage < self.hp {
self.hp -= damage;
}
}
fn cast_magic(&mut self, spell: u16) {
if spell == self.mana {
self.mana = 0;
} else if spell < self.mana {
self.mana -= spell
} else {
println!("Not enough mana to cast that spell!");
}
}
fn drink_health_potion(&mut self, potion: u16) {
self.hp += potion;
if self.hp > 100 {
self.hp = 100;
}
}
fn drink_mana_potion(&mut self, potion: u16) {
self.mana += potion;
if self.mana > 100 {
self.mana = 100;
}
}
}
pub fn run() {
// Create Char
let mut npc = Character::new("Rogue", 100, 80);
println!("Character: {:?}", npc);
println!("Class: {} with HP: {} and MANA: {}", npc.class, npc.hp, npc.mana);
// Hit npc
let hit: u16 = 25;
npc.take_hit(hit);
println!("npc HP after taking {} hit is {}", hit, npc.hp);
// Drink health potion
let small_health_potion: u16 = 20;
npc.drink_health_potion(small_health_potion);
println!("HP after drinka a potion: {}", npc.hp);
// Npc hits back with magic spell and then drinks potion
let spell: u16 = 30;
npc.cast_magic(spell);
println!("Mana after casting spell: {}", npc.mana);
// Power of a npc
let total = npc.total_power();
println!("total power of an npc is: {}", total);
npc.drink_mana_potion(55);
println!("Refilled mana and we have: {} mana", npc.mana);
} | 24.230769 | 80 | 0.535147 |
79b340825059080b8ae1f3eafe6dc4c0efab3d31 | 8,866 | //! The abstract **Material** class defines the interface that
//! material implementations must provide.
//std
use serde::{Deserialize, Serialize};
use std::cell::Cell;
use std::sync::Arc;
// pbrt
use crate::core::geometry::vec3_cross_vec3;
use crate::core::geometry::{Normal3f, Vector2f, Vector3f};
use crate::core::interaction::SurfaceInteraction;
use crate::core::pbrt::{Float, Spectrum};
use crate::core::texture::Texture;
use crate::materials::disney::DisneyMaterial;
use crate::materials::fourier::FourierMaterial;
use crate::materials::glass::GlassMaterial;
use crate::materials::hair::HairMaterial;
use crate::materials::matte::MatteMaterial;
use crate::materials::metal::MetalMaterial;
use crate::materials::mirror::MirrorMaterial;
use crate::materials::mixmat::MixMaterial;
use crate::materials::plastic::PlasticMaterial;
use crate::materials::substrate::SubstrateMaterial;
use crate::materials::subsurface::SubsurfaceMaterial;
use crate::materials::translucent::TranslucentMaterial;
use crate::materials::uber::UberMaterial;
// see material.h
/// Is used to inform non-symetric BSDFs about the transported
/// quantity so that they can correctly switch between the adjoint and
/// non-adjoint forms.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TransportMode {
Radiance,
Importance,
}
#[derive(Serialize, Deserialize)]
pub enum Material {
Disney(Box<DisneyMaterial>),
Fourier(Box<FourierMaterial>),
Glass(Box<GlassMaterial>),
Hair(Box<HairMaterial>),
Matte(Box<MatteMaterial>),
Metal(Box<MetalMaterial>),
Mirror(Box<MirrorMaterial>),
Mix(Box<MixMaterial>),
Plastic(Box<PlasticMaterial>),
Substrate(Box<SubstrateMaterial>),
Subsurface(Box<SubsurfaceMaterial>),
Translucent(Box<TranslucentMaterial>),
Uber(Box<UberMaterial>),
}
/// **Material** defines the interface that material implementations
/// must provide.
impl Material {
/// The method is given a **SurfaceInteraction** object that
/// contains geometric properties at an intersection point on the
/// surface of a shape and is responsible for determining the
/// reflective properties at the point and initializing some
/// member variables.
pub fn compute_scattering_functions(
&self,
si: &mut SurfaceInteraction,
// arena: &mut Arena,
mode: TransportMode,
allow_multiple_lobes: bool,
mat: Option<Arc<Material>>,
scale: Option<Spectrum>,
) {
match self {
Material::Disney(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Fourier(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Glass(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Hair(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Matte(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Metal(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Mirror(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Mix(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Plastic(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Substrate(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Subsurface(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Translucent(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
Material::Uber(material) => {
material.compute_scattering_functions(si, mode, allow_multiple_lobes, mat, scale)
}
}
}
/// Computing the effect of bump mapping at the point being shaded
/// given a particular displacement texture.
pub fn bump(d: &Arc<Texture<Float>>, si: &mut SurfaceInteraction)
where
Self: Sized,
{
// compute offset positions and evaluate displacement texture
let mut si_eval: SurfaceInteraction = SurfaceInteraction::default();
si_eval.common.p = si.common.p;
si_eval.common.time = si.common.time;
si_eval.common.p_error = si.common.p_error;
si_eval.common.wo = si.common.wo;
si_eval.common.n = si.common.n;
if let Some(ref medium_interface) = si.common.medium_interface {
si_eval.common.medium_interface = Some(medium_interface.clone());
} else {
si_eval.common.medium_interface = None;
}
si_eval.uv = si.uv;
si_eval.dpdu = si.dpdu;
si_eval.dpdv = si.dpdv;
si_eval.dndu = si.dndu;
si_eval.dndv = si.dndv;
si_eval.dudx = Cell::new(si.dudx.get());
si_eval.dvdx = Cell::new(si.dvdx.get());
si_eval.dudy = Cell::new(si.dudy.get());
si_eval.dvdy = Cell::new(si.dvdy.get());
si_eval.dpdx = Cell::new(si.dpdx.get());
si_eval.dpdy = Cell::new(si.dpdy.get());
if let Some(primitive) = &si.primitive {
Arc::new(*primitive);
} else {
si_eval.primitive = None
}
si_eval.shading.n = si.shading.n;
si_eval.shading.dpdu = si.shading.dpdu;
si_eval.shading.dpdv = si.shading.dpdv;
si_eval.shading.dndu = si.shading.dndu;
si_eval.shading.dndv = si.shading.dndv;
if let Some(bsdf) = &si.bsdf {
Arc::new(bsdf.clone());
} else {
si_eval.bsdf = None
}
// if let Some(bssrdf) = &si.bssrdf {
// Some(Arc::new(bssrdf.clone()));
// } else {
// si_eval.bssrdf = None
// }
if let Some(shape) = &si.shape {
Arc::new(shape);
} else {
si_eval.shape = None
}
// shift _si_eval_ _du_ in the $u$ direction
let mut du: Float = 0.5 as Float * (si.dudx.get().abs() + si.dudy.get().abs());
// The most common reason for du to be zero is for ray that start from
// light sources, where no differentials are available. In this case,
// we try to choose a small enough du so that we still get a decently
// accurate bump value.
if du == 0.0 as Float {
du = 0.0005 as Float;
}
{
si_eval.common.p = si.common.p + si.shading.dpdu * du;
si_eval.uv = si.uv
+ Vector2f {
x: du,
y: 0.0 as Float,
};
si_eval.common.n =
(Normal3f::from(vec3_cross_vec3(&si.shading.dpdu, &si.shading.dpdv))
+ si.dndu * du)
.normalize();
}
let u_displace: Float = d.evaluate(&si_eval);
// shift _si_eval_ _dv_ in the $v$ direction
let mut dv: Float = 0.5 as Float * (si.dvdx.get().abs() + si.dvdy.get().abs());
if dv == 00 as Float {
dv = 0.0005 as Float;
}
{
si_eval.common.p = si.common.p + si.shading.dpdv * dv;
si_eval.uv = si.uv
+ Vector2f {
x: 0.0 as Float,
y: dv,
};
si_eval.common.n =
(Normal3f::from(vec3_cross_vec3(&si.shading.dpdu, &si.shading.dpdv))
+ si.dndv * dv)
.normalize();
}
let v_displace: Float = d.evaluate(&si_eval);
let displace: Float = d.evaluate(&si);
// compute bump-mapped differential geometry
let dpdu: Vector3f = si.shading.dpdu
+ Vector3f::from(si.shading.n) * ((u_displace - displace) / du)
+ Vector3f::from(si.shading.dndu) * displace;
let dpdv: Vector3f = si.shading.dpdv
+ Vector3f::from(si.shading.n) * ((v_displace - displace) / dv)
+ Vector3f::from(si.shading.dndv) * displace;
let dndu = si.shading.dndu;
let dndv = si.shading.dndv;
si.set_shading_geometry(&dpdu, &dpdv, &dndu, &dndv, false);
}
}
| 39.757848 | 97 | 0.596887 |
ac30093ba826015fba0f9859172857b2f2d3861b | 13,972 | use rustc_errors::DiagnosticBuilder;
use rustc_infer::infer::canonical::Canonical;
use rustc_infer::infer::error_reporting::nice_region_error::NiceRegionError;
use rustc_infer::infer::region_constraints::Constraint;
use rustc_infer::infer::{InferCtxt, RegionResolutionError, SubregionOrigin, TyCtxtInferExt as _};
use rustc_infer::traits::{Normalized, ObligationCause, TraitEngine, TraitEngineExt};
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc_span::Span;
use rustc_trait_selection::traits::query::type_op;
use rustc_trait_selection::traits::{SelectionContext, TraitEngineExt as _};
use rustc_traits::{type_op_ascribe_user_type_with_span, type_op_prove_predicate_with_span};
use std::fmt;
use std::rc::Rc;
use crate::borrow_check::region_infer::values::RegionElement;
use crate::borrow_check::MirBorrowckCtxt;
#[derive(Clone)]
crate struct UniverseInfo<'tcx>(UniverseInfoInner<'tcx>);
/// What operation a universe was created for.
#[derive(Clone)]
enum UniverseInfoInner<'tcx> {
/// Relating two types which have binders.
RelateTys { expected: Ty<'tcx>, found: Ty<'tcx> },
/// Created from performing a `TypeOp`.
TypeOp(Rc<dyn TypeOpInfo<'tcx> + 'tcx>),
/// Any other reason.
Other,
}
impl UniverseInfo<'tcx> {
crate fn other() -> UniverseInfo<'tcx> {
UniverseInfo(UniverseInfoInner::Other)
}
crate fn relate(expected: Ty<'tcx>, found: Ty<'tcx>) -> UniverseInfo<'tcx> {
UniverseInfo(UniverseInfoInner::RelateTys { expected, found })
}
crate fn report_error(
&self,
mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
placeholder: ty::PlaceholderRegion,
error_element: RegionElement,
span: Span,
) {
match self.0 {
UniverseInfoInner::RelateTys { expected, found } => {
let body_id = mbcx.infcx.tcx.hir().local_def_id_to_hir_id(mbcx.mir_def_id());
let err = mbcx.infcx.report_mismatched_types(
&ObligationCause::misc(span, body_id),
expected,
found,
TypeError::RegionsPlaceholderMismatch,
);
err.buffer(&mut mbcx.errors_buffer);
}
UniverseInfoInner::TypeOp(ref type_op_info) => {
type_op_info.report_error(mbcx, placeholder, error_element, span);
}
UniverseInfoInner::Other => {
// FIXME: This error message isn't great, but it doesn't show
// up in the existing UI tests. Consider investigating this
// some more.
mbcx.infcx
.tcx
.sess
.struct_span_err(span, "higher-ranked subtype error")
.buffer(&mut mbcx.errors_buffer);
}
}
}
}
crate trait ToUniverseInfo<'tcx> {
fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx>;
}
impl<'tcx> ToUniverseInfo<'tcx>
for Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::prove_predicate::ProvePredicate<'tcx>>>
{
fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(PredicateQuery {
canonical_query: self,
base_universe,
})))
}
}
impl<'tcx, T: Copy + fmt::Display + TypeFoldable<'tcx> + 'tcx> ToUniverseInfo<'tcx>
for Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>
{
fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(NormalizeQuery {
canonical_query: self,
base_universe,
})))
}
}
impl<'tcx> ToUniverseInfo<'tcx>
for Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>
{
fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(AscribeUserTypeQuery {
canonical_query: self,
base_universe,
})))
}
}
impl<'tcx, F, G> ToUniverseInfo<'tcx> for Canonical<'tcx, type_op::custom::CustomTypeOp<F, G>> {
fn to_universe_info(self, _base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
// We can't rerun custom type ops.
UniverseInfo::other()
}
}
#[allow(unused_lifetimes)]
trait TypeOpInfo<'tcx> {
/// Returns an error to be reported if rerunning the type op fails to
/// recover the error's cause.
fn fallback_error(&self, tcx: TyCtxt<'tcx>, span: Span) -> DiagnosticBuilder<'tcx>;
fn base_universe(&self) -> ty::UniverseIndex;
fn nice_error(
&self,
tcx: TyCtxt<'tcx>,
span: Span,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx>>;
fn report_error(
&self,
mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
placeholder: ty::PlaceholderRegion,
error_element: RegionElement,
span: Span,
) {
let tcx = mbcx.infcx.tcx;
let base_universe = self.base_universe();
let adjusted_universe = if let Some(adjusted) =
placeholder.universe.as_u32().checked_sub(base_universe.as_u32())
{
adjusted
} else {
self.fallback_error(tcx, span).buffer(&mut mbcx.errors_buffer);
return;
};
let placeholder_region = tcx.mk_region(ty::RePlaceholder(ty::Placeholder {
name: placeholder.name,
universe: adjusted_universe.into(),
}));
let error_region =
if let RegionElement::PlaceholderRegion(error_placeholder) = error_element {
let adjusted_universe =
error_placeholder.universe.as_u32().checked_sub(base_universe.as_u32());
adjusted_universe.map(|adjusted| {
tcx.mk_region(ty::RePlaceholder(ty::Placeholder {
name: error_placeholder.name,
universe: adjusted.into(),
}))
})
} else {
None
};
debug!(?placeholder_region);
let nice_error = self.nice_error(tcx, span, placeholder_region, error_region);
if let Some(nice_error) = nice_error {
nice_error.buffer(&mut mbcx.errors_buffer);
} else {
self.fallback_error(tcx, span).buffer(&mut mbcx.errors_buffer);
}
}
}
struct PredicateQuery<'tcx> {
canonical_query:
Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::prove_predicate::ProvePredicate<'tcx>>>,
base_universe: ty::UniverseIndex,
}
impl TypeOpInfo<'tcx> for PredicateQuery<'tcx> {
fn fallback_error(&self, tcx: TyCtxt<'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
let mut err = tcx.sess.struct_span_err(span, "higher-ranked lifetime error");
err.note(&format!("could not prove {}", self.canonical_query.value.value.predicate));
err
}
fn base_universe(&self) -> ty::UniverseIndex {
self.base_universe
}
fn nice_error(
&self,
tcx: TyCtxt<'tcx>,
span: Span,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx>> {
tcx.infer_ctxt().enter_with_canonical(span, &self.canonical_query, |ref infcx, key, _| {
let mut fulfill_cx = <dyn TraitEngine<'_>>::new(tcx);
type_op_prove_predicate_with_span(infcx, &mut *fulfill_cx, key, Some(span));
try_extract_error_from_fulfill_cx(fulfill_cx, infcx, placeholder_region, error_region)
})
}
}
struct NormalizeQuery<'tcx, T> {
canonical_query: Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>,
base_universe: ty::UniverseIndex,
}
impl<T> TypeOpInfo<'tcx> for NormalizeQuery<'tcx, T>
where
T: Copy + fmt::Display + TypeFoldable<'tcx> + 'tcx,
{
fn fallback_error(&self, tcx: TyCtxt<'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
let mut err = tcx.sess.struct_span_err(span, "higher-ranked lifetime error");
err.note(&format!("could not normalize `{}`", self.canonical_query.value.value.value));
err
}
fn base_universe(&self) -> ty::UniverseIndex {
self.base_universe
}
fn nice_error(
&self,
tcx: TyCtxt<'tcx>,
span: Span,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx>> {
tcx.infer_ctxt().enter_with_canonical(span, &self.canonical_query, |ref infcx, key, _| {
let mut fulfill_cx = <dyn TraitEngine<'_>>::new(tcx);
let mut selcx = SelectionContext::new(infcx);
// FIXME(lqd): Unify and de-duplicate the following with the actual
// `rustc_traits::type_op::type_op_normalize` query to allow the span we need in the
// `ObligationCause`. The normalization results are currently different between
// `AtExt::normalize` used in the query and `normalize` called below: the former fails
// to normalize the `nll/relate_tys/impl-fn-ignore-binder-via-bottom.rs` test. Check
// after #85499 lands to see if its fixes have erased this difference.
let (param_env, value) = key.into_parts();
let Normalized { value: _, obligations } = rustc_trait_selection::traits::normalize(
&mut selcx,
param_env,
ObligationCause::dummy_with_span(span),
value.value,
);
fulfill_cx.register_predicate_obligations(infcx, obligations);
try_extract_error_from_fulfill_cx(fulfill_cx, infcx, placeholder_region, error_region)
})
}
}
struct AscribeUserTypeQuery<'tcx> {
canonical_query: Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>,
base_universe: ty::UniverseIndex,
}
impl TypeOpInfo<'tcx> for AscribeUserTypeQuery<'tcx> {
fn fallback_error(&self, tcx: TyCtxt<'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
// FIXME: This error message isn't great, but it doesn't show up in the existing UI tests,
// and is only the fallback when the nice error fails. Consider improving this some more.
tcx.sess.struct_span_err(span, "higher-ranked lifetime error")
}
fn base_universe(&self) -> ty::UniverseIndex {
self.base_universe
}
fn nice_error(
&self,
tcx: TyCtxt<'tcx>,
span: Span,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx>> {
tcx.infer_ctxt().enter_with_canonical(span, &self.canonical_query, |ref infcx, key, _| {
let mut fulfill_cx = <dyn TraitEngine<'_>>::new(tcx);
type_op_ascribe_user_type_with_span(infcx, &mut *fulfill_cx, key, Some(span)).ok()?;
try_extract_error_from_fulfill_cx(fulfill_cx, infcx, placeholder_region, error_region)
})
}
}
fn try_extract_error_from_fulfill_cx<'tcx>(
mut fulfill_cx: Box<dyn TraitEngine<'tcx> + 'tcx>,
infcx: &InferCtxt<'_, 'tcx>,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx>> {
let tcx = infcx.tcx;
// We generally shouldn't have errors here because the query was
// already run, but there's no point using `delay_span_bug`
// when we're going to emit an error here anyway.
let _errors = fulfill_cx.select_all_or_error(infcx).err().unwrap_or_else(Vec::new);
let (sub_region, cause) = infcx.with_region_constraints(|region_constraints| {
debug!(?region_constraints);
region_constraints.constraints.iter().find_map(|(constraint, cause)| {
match *constraint {
Constraint::RegSubReg(sub, sup) if sup == placeholder_region && sup != sub => {
Some((sub, cause.clone()))
}
// FIXME: Should this check the universe of the var?
Constraint::VarSubReg(vid, sup) if sup == placeholder_region => {
Some((tcx.mk_region(ty::ReVar(vid)), cause.clone()))
}
_ => None,
}
})
})?;
debug!(?sub_region, ?cause);
let nice_error = match (error_region, sub_region) {
(Some(error_region), &ty::ReVar(vid)) => NiceRegionError::new(
infcx,
RegionResolutionError::SubSupConflict(
vid,
infcx.region_var_origin(vid),
cause.clone(),
error_region,
cause.clone(),
placeholder_region,
),
),
(Some(error_region), _) => NiceRegionError::new(
infcx,
RegionResolutionError::ConcreteFailure(cause.clone(), error_region, placeholder_region),
),
// Note universe here is wrong...
(None, &ty::ReVar(vid)) => NiceRegionError::new(
infcx,
RegionResolutionError::UpperBoundUniverseConflict(
vid,
infcx.region_var_origin(vid),
infcx.universe_of_region(sub_region),
cause.clone(),
placeholder_region,
),
),
(None, _) => NiceRegionError::new(
infcx,
RegionResolutionError::ConcreteFailure(cause.clone(), sub_region, placeholder_region),
),
};
nice_error.try_report_from_nll().or_else(|| {
if let SubregionOrigin::Subtype(trace) = cause {
Some(
infcx.report_and_explain_type_error(*trace, &TypeError::RegionsPlaceholderMismatch),
)
} else {
None
}
})
}
| 37.358289 | 100 | 0.613871 |
79d091645b2d7c3c8264f46250bd5ba7be54a97b | 6,012 | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Bash_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
bash_work_dir: String,
main_file_path: String,
}
impl Interpreter for Bash_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Bash_original> {
let bwd = data.work_dir.clone() + "/bash-original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&bwd)
.expect("Could not create directory for bash-original");
let mfp = bwd.clone() + "/main.sh";
Box::new(Bash_original {
data,
support_level: level,
code: String::from(""),
bash_work_dir: bwd,
main_file_path: mfp,
})
}
fn get_name() -> String {
String::from("Bash_original")
}
fn behave_repl_like_default() -> bool {
true
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Bash / Shell"),
String::from("bash"),
String::from("shell"),
String::from("sh"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Bloc
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if !self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
//add shebang just in case
self.code = String::from("#!/usr/bin/env bash \n") + &self.code;
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
let mut _file =
File::create(&self.main_file_path).expect("Failed to create file for bash-original");
write(&self.main_file_path, &self.code).expect("Unable to write to file for bash-original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new("bash")
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr).unwrap(),
))
}
}
}
impl ReplLikeInterpreter for Bash_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()?;
let previous_code = self.read_previous_code();
self.code = previous_code + "\n" + &self.code;
Ok(())
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
self.add_boilerplate()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
fn strip_prints(code: &str) -> String {
let mut striped_code = String::new();
let print_statements = vec!["echo ", "print "];
let mut count = 0i64;
for line in code.lines() {
//basic splitting only
let opening_bracket = line.matches('{').count() as i64;
let closing_bracket = line.matches('}').count() as i64;
count += opening_bracket - closing_bracket;
if count <= 0 {
if print_statements.iter().all(|ps| !line.contains(ps)) {
// does not contains any print statement
striped_code.push_str(line);
striped_code.push('\n');
}
} else {
striped_code.push_str(line);
striped_code.push('\n');
}
}
striped_code
}
let res = self.execute();
if res.is_ok() {
let _ = self.fetch_code();
self.save_code(strip_prints(&self.code));
}
info!("executed as repl");
res
}
}
#[cfg(test)]
mod test_bash_original {
use super::*;
use serial_test::serial;
#[test]
#[serial(bash)]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("A=2 && echo $A");
let mut interpreter = Bash_original::new(data);
let res = interpreter.run();
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "2\n");
}
#[test]
#[serial(bash)]
fn block_things() {
let mut data = DataHolder::new();
data.current_bloc = String::from("A=2\nsleep $A\necho $A");
let mut interpreter = Bash_original::new(data);
let res = interpreter.run();
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "2\n");
}
}
| 29.326829 | 100 | 0.530938 |
f89337b228cc4faa3569bb1b1ddcff195a464c56 | 5,665 | use crate::{
geometry::Point,
primitives::{
line::{self, Line},
triangle::{sort_two_yx, sort_yx, IterState, Triangle},
Primitive,
},
};
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub enum PointType {
Border,
Inside,
}
/// Iterator over all points inside the triangle.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub struct ScanlineIterator {
/// Left-most edge of the triangle
line_a: line::Points,
/// Right-most edge of the triangle
line_b: line::Points,
/// Bottom edge of the triangle
line_c: line::Points,
cur_ac: Option<Point>,
cur_b: Option<Point>,
next_ac: Option<Point>,
next_b: Option<Point>,
x: i32,
max_y: i32,
min_y: i32,
}
impl ScanlineIterator {
pub(in crate::primitives) fn new(triangle: &Triangle) -> Self {
let (v1, v2, v3) = sort_yx(triangle.p1, triangle.p2, triangle.p3);
let mut line_a = Line::new(v1, v2).points();
let mut line_b = Line::new(v1, v3).points();
let mut line_c = Line::new(v2, v3).points();
let next_ac = line_a.next().or_else(|| line_c.next());
let next_b = line_b.next();
Self {
line_a,
line_b,
line_c,
cur_ac: None,
cur_b: None,
next_ac,
next_b,
x: 0,
min_y: v1.y,
max_y: v3.y,
}
}
pub(in crate::primitives) fn empty() -> Self {
Self {
line_a: line::Points::empty(),
line_b: line::Points::empty(),
line_c: line::Points::empty(),
cur_ac: None,
cur_b: None,
next_ac: None,
next_b: None,
x: 0,
max_y: 0,
min_y: 0,
}
}
fn update_ac(&mut self) -> IterState {
if let Some(ac) = self.next_ac {
self.cur_ac = Some(ac);
self.next_ac = self.line_a.next().or_else(|| self.line_c.next());
self.x = 0;
IterState::Border(ac)
} else {
IterState::None
}
}
fn update_b(&mut self) -> IterState {
if let Some(b) = self.next_b {
self.cur_b = Some(b);
self.next_b = self.line_b.next();
self.x = 0;
IterState::Border(b)
} else {
IterState::None
}
}
pub(in crate::primitives::triangle) fn points(&mut self) -> IterState {
match (self.cur_ac, self.cur_b) {
// Point of ac line or b line is missing
(None, _) => self.update_ac(),
(_, None) => self.update_b(),
// Both points are present
(Some(ac), Some(b)) => {
match (self.next_ac, self.next_b) {
(Some(n_ac), Some(n_b)) => {
// If y component differs, take new points from edge until both side have
// the same y
if n_ac.y < n_b.y {
self.update_ac()
} else if n_ac.y > n_b.y {
self.update_b()
} else {
let (l, r) = sort_two_yx(n_ac, n_b);
IterState::LeftRight(l, r)
}
}
(None, Some(_)) => self.update_b(),
(Some(_), None) => self.update_ac(),
(None, None) => {
let (l, r) = sort_two_yx(ac, b);
IterState::LeftRight(l, r)
}
}
}
}
}
}
impl Iterator for ScanlineIterator {
type Item = (PointType, Point);
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.points() {
IterState::Border(point) => {
// Draw edges of the triangle
self.x += 1;
return Some((PointType::Border, point));
}
IterState::LeftRight(l, r) => {
// Fill the space between the left and right points
if l.x + self.x < r.x {
let point = Point::new(l.x + self.x, l.y);
self.x += 1;
return Some((PointType::Inside, point));
} else if l.x + self.x >= r.x {
// We reached the right edge, move on to next row
self.cur_ac = None;
self.cur_b = None;
}
}
IterState::None => return None,
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
drawable::Pixel, pixelcolor::BinaryColor, style::PrimitiveStyle, transform::Transform,
};
#[test]
fn points_iter() {
let triangle = Triangle::new(Point::new(5, 10), Point::new(15, 20), Point::new(10, 15));
let styled_points = triangle
.clone()
.into_styled(PrimitiveStyle::with_fill(BinaryColor::On))
.into_iter()
.map(|Pixel(p, _)| p);
assert!(triangle.points().eq(styled_points));
}
#[test]
fn off_screen_still_draws_points() {
let off_screen = Triangle::new(Point::new(10, 10), Point::new(20, 20), Point::new(30, -30));
let on_screen = off_screen.translate(Point::new(0, 35));
assert!(off_screen
.points()
.eq(on_screen.points().map(|p| p - Point::new(0, 35))));
}
}
| 29.973545 | 100 | 0.462136 |
2831842baf0a2408c1d326761cb8241b1e7e80ce | 270 | // Copyright 2020-2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
mod message;
mod milestone;
pub(crate) use message::{MessageResponderWorker, MessageResponderWorkerEvent};
pub(crate) use milestone::{MilestoneResponderWorker, MilestoneResponderWorkerEvent};
| 30 | 84 | 0.814815 |
9c98cac209c1a21309bb43742c1403f4b6c9a317 | 2,159 | // Copyright 2018-2022 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::UserProfileStoreOperations;
use diesel::{prelude::*, result::Error::NotFound};
use crate::biome::profile::store::{
diesel::{models::ProfileModel, schema::user_profile},
Profile, UserProfileStoreError,
};
use crate::error::{InternalError, InvalidArgumentError};
pub trait UserProfileStoreGetProfile {
fn get_profile(&self, user_id: &str) -> Result<Profile, UserProfileStoreError>;
}
impl<'a, C> UserProfileStoreGetProfile for UserProfileStoreOperations<'a, C>
where
C: diesel::Connection,
i64: diesel::deserialize::FromSql<diesel::sql_types::BigInt, C::Backend>,
String: diesel::deserialize::FromSql<diesel::sql_types::Text, C::Backend>,
{
fn get_profile(&self, user_id: &str) -> Result<Profile, UserProfileStoreError> {
let profile = user_profile::table
.filter(user_profile::user_id.eq(user_id))
.first::<ProfileModel>(self.conn)
.map(Some)
.or_else(|err| if err == NotFound { Ok(None) } else { Err(err) })
.map_err(|err| {
UserProfileStoreError::Internal(InternalError::with_message(format!(
"Failed check for existing user_id {}",
err
)))
})?
.ok_or_else(|| {
UserProfileStoreError::InvalidArgument(InvalidArgumentError::new(
"user_id".to_string(),
"A profile for the given user_id does not exist".to_string(),
))
})?;
Ok(Profile::from(profile))
}
}
| 38.553571 | 84 | 0.649375 |
39ed765283bf26a41e0eacea8e05c97e28ac03e5 | 16,656 | use data_encoding::HEXUPPER;
use multihash::Multihash;
use multiaddr::*;
use quickcheck::{Arbitrary, Gen, QuickCheck};
use std::{
borrow::Cow,
convert::{TryFrom, TryInto},
iter::{FromIterator, self},
net::{Ipv4Addr, Ipv6Addr},
str::FromStr
};
// Property tests
#[test]
fn to_from_bytes_identity() {
fn prop(a: Ma) -> bool {
let b = a.0.to_vec();
Some(a) == Multiaddr::try_from(b).ok().map(Ma)
}
QuickCheck::new().quickcheck(prop as fn(Ma) -> bool)
}
#[test]
fn to_from_str_identity() {
fn prop(a: Ma) -> bool {
let b = a.0.to_string();
Some(a) == Multiaddr::from_str(&b).ok().map(Ma)
}
QuickCheck::new().quickcheck(prop as fn(Ma) -> bool)
}
#[test]
fn byteswriter() {
fn prop(a: Ma, b: Ma) -> bool {
let mut x = a.0.clone();
for p in b.0.iter() {
x = x.with(p)
}
x.iter().zip(a.0.iter().chain(b.0.iter())).all(|(x, y)| x == y)
}
QuickCheck::new().quickcheck(prop as fn(Ma, Ma) -> bool)
}
#[test]
fn push_pop_identity() {
fn prop(a: Ma, p: Proto) -> bool {
let mut b = a.clone();
let q = p.clone();
b.0.push(q.0);
assert_ne!(a.0, b.0);
Some(p.0) == b.0.pop() && a.0 == b.0
}
QuickCheck::new().quickcheck(prop as fn(Ma, Proto) -> bool)
}
#[test]
fn ends_with() {
fn prop(Ma(m): Ma) {
let n = m.iter().count();
for i in 0 .. n {
let suffix = m.iter().skip(i).collect::<Multiaddr>();
assert!(m.ends_with(&suffix));
}
}
QuickCheck::new().quickcheck(prop as fn(_))
}
// Arbitrary impls
#[derive(PartialEq, Eq, Clone, Hash, Debug)]
struct Ma(Multiaddr);
impl Arbitrary for Ma {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let iter = (0 .. g.next_u32() % 128).map(|_| Proto::arbitrary(g).0);
Ma(Multiaddr::from_iter(iter))
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
struct Proto(Protocol<'static>);
impl Arbitrary for Proto {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
use Protocol::*;
match u8::arbitrary(g) % 26 { // TODO: Add Protocol::Quic
0 => Proto(Dccp(Arbitrary::arbitrary(g))),
1 => Proto(Dns(Cow::Owned(SubString::arbitrary(g).0))),
2 => Proto(Dns4(Cow::Owned(SubString::arbitrary(g).0))),
3 => Proto(Dns6(Cow::Owned(SubString::arbitrary(g).0))),
4 => Proto(Http),
5 => Proto(Https),
6 => Proto(Ip4(Ipv4Addr::arbitrary(g))),
7 => Proto(Ip6(Ipv6Addr::arbitrary(g))),
8 => Proto(P2pWebRtcDirect),
9 => Proto(P2pWebRtcStar),
10 => Proto(P2pWebSocketStar),
11 => Proto(Memory(Arbitrary::arbitrary(g))),
// TODO: impl Arbitrary for Multihash:
12 => Proto(P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))),
13 => Proto(P2pCircuit),
14 => Proto(Quic),
15 => Proto(Sctp(Arbitrary::arbitrary(g))),
16 => Proto(Tcp(Arbitrary::arbitrary(g))),
17 => Proto(Udp(Arbitrary::arbitrary(g))),
18 => Proto(Udt),
19 => Proto(Unix(Cow::Owned(SubString::arbitrary(g).0))),
20 => Proto(Utp),
21 => Proto(Ws("/".into())),
22 => Proto(Wss("/".into())),
23 => {
let a = iter::repeat_with(|| u8::arbitrary(g))
.take(10)
.collect::<Vec<_>>()
.try_into()
.unwrap();
Proto(Onion(Cow::Owned(a), std::cmp::max(1, u16::arbitrary(g))))
},
24 => {
let a: [u8;35] = iter::repeat_with(|| u8::arbitrary(g))
.take(35)
.collect::<Vec<_>>()
.try_into()
.unwrap();
Proto(Onion3((a, std::cmp::max(1, u16::arbitrary(g))).into()))
},
25 => Proto(Tls),
_ => panic!("outside range")
}
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
struct SubString(String); // ASCII string without '/'
impl Arbitrary for SubString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let mut s = String::arbitrary(g);
s.retain(|c| c.is_ascii() && c != '/');
SubString(s)
}
}
// other unit tests
fn ma_valid(source: &str, target: &str, protocols: Vec<Protocol<'_>>) {
let parsed = source.parse::<Multiaddr>().unwrap();
assert_eq!(HEXUPPER.encode(&parsed.to_vec()[..]), target);
assert_eq!(parsed.iter().collect::<Vec<_>>(), protocols);
assert_eq!(source.parse::<Multiaddr>().unwrap().to_string(), source);
assert_eq!(Multiaddr::try_from(HEXUPPER.decode(target.as_bytes()).unwrap()).unwrap(), parsed);
}
fn multihash(s: &str) -> Multihash {
Multihash::from_bytes(&bs58::decode(s).into_vec().unwrap()).unwrap()
}
#[test]
fn multiaddr_eq() {
let m1 = "/ip4/127.0.0.1/udp/1234".parse::<Multiaddr>().unwrap();
let m2 = "/ip4/127.0.0.1/tcp/1234".parse::<Multiaddr>().unwrap();
let m3 = "/ip4/127.0.0.1/tcp/1234".parse::<Multiaddr>().unwrap();
assert_ne!(m1, m2);
assert_ne!(m2, m1);
assert_eq!(m2, m3);
assert_eq!(m1, m1);
}
#[test]
fn construct_success() {
use Protocol::*;
let local: Ipv4Addr = "127.0.0.1".parse().unwrap();
let addr6: Ipv6Addr = "2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095".parse().unwrap();
ma_valid("/ip4/1.2.3.4", "0401020304", vec![Ip4("1.2.3.4".parse().unwrap())]);
ma_valid("/ip4/0.0.0.0", "0400000000", vec![Ip4("0.0.0.0".parse().unwrap())]);
ma_valid("/ip6/::1", "2900000000000000000000000000000001", vec![Ip6("::1".parse().unwrap())]);
ma_valid("/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21",
"29260100094F819700803ECA6566E80C21",
vec![Ip6("2601:9:4f81:9700:803e:ca65:66e8:c21".parse().unwrap())]);
ma_valid("/udp/0", "91020000", vec![Udp(0)]);
ma_valid("/tcp/0", "060000", vec![Tcp(0)]);
ma_valid("/sctp/0", "84010000", vec![Sctp(0)]);
ma_valid("/udp/1234", "910204D2", vec![Udp(1234)]);
ma_valid("/tcp/1234", "0604D2", vec![Tcp(1234)]);
ma_valid("/sctp/1234", "840104D2", vec![Sctp(1234)]);
ma_valid("/udp/65535", "9102FFFF", vec![Udp(65535)]);
ma_valid("/tcp/65535", "06FFFF", vec![Tcp(65535)]);
ma_valid("/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B",
vec![P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))]);
ma_valid("/udp/1234/sctp/1234", "910204D2840104D2", vec![Udp(1234), Sctp(1234)]);
ma_valid("/udp/1234/udt", "910204D2AD02", vec![Udp(1234), Udt]);
ma_valid("/udp/1234/utp", "910204D2AE02", vec![Udp(1234), Utp]);
ma_valid("/tcp/1234/http", "0604D2E003", vec![Tcp(1234), Http]);
ma_valid("/tcp/1234/tls/http", "0604D2C003E003", vec![Tcp(1234), Tls, Http]);
ma_valid("/tcp/1234/https", "0604D2BB03", vec![Tcp(1234), Https]);
ma_valid("/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B0604D2",
vec![P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC")), Tcp(1234)]);
ma_valid("/ip4/127.0.0.1/udp/1234", "047F000001910204D2", vec![Ip4(local.clone()), Udp(1234)]);
ma_valid("/ip4/127.0.0.1/udp/0", "047F00000191020000", vec![Ip4(local.clone()), Udp(0)]);
ma_valid("/ip4/127.0.0.1/tcp/1234", "047F0000010604D2", vec![Ip4(local.clone()), Tcp(1234)]);
ma_valid("/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"047F000001A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B",
vec![Ip4(local.clone()), P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))]);
ma_valid("/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"047F000001A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B0604D2",
vec![Ip4(local.clone()), P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC")), Tcp(1234)]);
// /unix/a/b/c/d/e,
// /unix/stdio,
// /ip4/1.2.3.4/tcp/80/unix/a/b/c/d/e/f,
// /ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234/unix/stdio
ma_valid("/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/ws/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"29200108A07AC542013AC986FFFE317095061F40DD03A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B",
vec![Ip6(addr6.clone()), Tcp(8000), Ws("/".into()), P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))
]);
ma_valid("/p2p-webrtc-star/ip4/127.0.0.1/tcp/9090/ws/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"9302047F000001062382DD03A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B",
vec![P2pWebRtcStar, Ip4(local.clone()), Tcp(9090), Ws("/".into()), P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))
]);
ma_valid("/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/wss/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"29200108A07AC542013AC986FFFE317095061F40DE03A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B",
vec![Ip6(addr6.clone()), Tcp(8000), Wss("/".into()), P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))]);
ma_valid("/ip4/127.0.0.1/tcp/9090/p2p-circuit/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"047F000001062382A202A503221220D52EBB89D85B02A284948203A62FF28389C57C9F42BEEC4EC20DB76A68911C0B",
vec![Ip4(local.clone()), Tcp(9090), P2pCircuit, P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))]);
ma_valid(
"/onion/aaimaq4ygg2iegci:80",
"BC030010C0439831B48218480050",
vec![Onion(Cow::Owned([0, 16, 192, 67, 152, 49, 180, 130, 24, 72]), 80)],
);
ma_valid(
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234",
"BD03ADADEC040BE047F9658668B11A504F3155001F231A37F54C4476C07FB4CC139ED7E30304D2",
vec![Onion3(([173, 173, 236, 4, 11, 224, 71, 249, 101, 134, 104, 177, 26, 80, 79, 49, 85, 0, 31, 35, 26, 55, 245, 76, 68, 118, 192, 127, 180, 204, 19, 158, 215, 227, 3], 1234).into())],
);
ma_valid(
"/dnsaddr/sjc-1.bootstrap.libp2p.io",
"3819736A632D312E626F6F7473747261702E6C69627032702E696F",
vec![Dnsaddr(Cow::Borrowed("sjc-1.bootstrap.libp2p.io"))]
);
ma_valid(
"/dnsaddr/sjc-1.bootstrap.libp2p.io/tcp/1234/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
"3819736A632D312E626F6F7473747261702E6C69627032702E696F0604D2A50322122006B3608AA000274049EB28AD8E793A26FF6FAB281A7D3BD77CD18EB745DFAABB",
vec![Dnsaddr(Cow::Borrowed("sjc-1.bootstrap.libp2p.io")), Tcp(1234), P2p(multihash("QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"))]
);
ma_valid("/ip4/127.0.0.1/tcp/127/ws", "047F00000106007FDD03", vec![Ip4(local.clone()),Tcp(127),Ws("/".into())] );
ma_valid("/ip4/127.0.0.1/tcp/127/tls", "047F00000106007FC003", vec![Ip4(local.clone()),Tcp(127),Tls] );
ma_valid("/ip4/127.0.0.1/tcp/127/tls/ws", "047F00000106007FC003DD03", vec![Ip4(local.clone()),Tcp(127),Tls,Ws("/".into())] );
}
#[test]
fn construct_fail() {
let addresses = [
"/ip4",
"/ip4/::1",
"/ip4/fdpsofodsajfdoisa",
"/ip6",
"/udp",
"/tcp",
"/sctp",
"/udp/65536",
"/tcp/65536",
"/onion/9imaq4ygg2iegci7:80",
"/onion/aaimaq4ygg2iegci7:80",
"/onion/timaq4ygg2iegci7:0",
"/onion/timaq4ygg2iegci7:-1",
"/onion/timaq4ygg2iegci7",
"/onion/timaq4ygg2iegci@:666",
"/onion3/9ww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:80",
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd7:80",
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:0",
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:-1",
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd",
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyy@:666",
"/udp/1234/sctp",
"/udp/1234/udt/1234",
"/udp/1234/utp/1234",
"/ip4/127.0.0.1/udp/jfodsajfidosajfoidsa",
"/ip4/127.0.0.1/udp",
"/ip4/127.0.0.1/tcp/jfodsajfidosajfoidsa",
"/ip4/127.0.0.1/tcp",
"/ip4/127.0.0.1/p2p",
"/ip4/127.0.0.1/p2p/tcp",
"/p2p-circuit/50"
];
for address in &addresses {
assert!(address.parse::<Multiaddr>().is_err(), "{}", address.to_string());
}
}
#[test]
fn to_multiaddr() {
assert_eq!(Multiaddr::from(Ipv4Addr::new(127, 0, 0, 1)), "/ip4/127.0.0.1".parse().unwrap());
assert_eq!(Multiaddr::from(Ipv6Addr::new(0x2601, 0x9, 0x4f81, 0x9700, 0x803e, 0xca65, 0x66e8, 0xc21)),
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21".parse().unwrap());
assert_eq!(Multiaddr::try_from("/ip4/127.0.0.1/tcp/1234".to_string()).unwrap(),
"/ip4/127.0.0.1/tcp/1234".parse::<Multiaddr>().unwrap());
assert_eq!(Multiaddr::try_from("/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21").unwrap(),
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21".parse::<Multiaddr>().unwrap());
assert_eq!(Multiaddr::from(Ipv4Addr::new(127, 0, 0, 1)).with(Protocol::Tcp(1234)),
"/ip4/127.0.0.1/tcp/1234".parse::<Multiaddr>().unwrap());
assert_eq!(Multiaddr::from(Ipv6Addr::new(0x2601, 0x9, 0x4f81, 0x9700, 0x803e, 0xca65, 0x66e8, 0xc21))
.with(Protocol::Tcp(1234)),
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21/tcp/1234".parse::<Multiaddr>().unwrap());
}
#[test]
fn from_bytes_fail() {
let bytes = vec![1, 2, 3, 4];
assert!(Multiaddr::try_from(bytes).is_err());
}
#[test]
fn ser_and_deser_json() {
let addr : Multiaddr = "/ip4/0.0.0.0/tcp/0/tls".parse::<Multiaddr>().unwrap();
let serialized = serde_json::to_string(&addr).unwrap();
assert_eq!(serialized, "\"/ip4/0.0.0.0/tcp/0/tls\"");
let deserialized: Multiaddr = serde_json::from_str(&serialized).unwrap();
assert_eq!(addr, deserialized);
}
#[test]
fn ser_and_deser_bincode() {
let addr : Multiaddr = "/ip4/0.0.0.0/tcp/0/tls".parse::<Multiaddr>().unwrap();
let serialized = bincode::serialize(&addr).unwrap();
// compact addressing
assert_eq!(serialized, vec![10, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 6, 0, 0, 192, 3]);
let deserialized: Multiaddr = bincode::deserialize(&serialized).unwrap();
assert_eq!(addr, deserialized);
}
#[test]
fn append() {
let mut a: Multiaddr = Protocol::Ip4(Ipv4Addr::new(1, 2, 3, 4)).into();
a.push(Protocol::Tcp(80));
a.push(Protocol::Http);
let mut i = a.iter();
assert_eq!(Some(Protocol::Ip4(Ipv4Addr::new(1, 2, 3, 4))), i.next());
assert_eq!(Some(Protocol::Tcp(80)), i.next());
assert_eq!(Some(Protocol::Http), i.next());
assert_eq!(None, i.next())
}
fn replace_ip_addr(a: &Multiaddr, p: Protocol<'_>) -> Option<Multiaddr> {
a.replace(0, move |x| match x {
Protocol::Ip4(_) | Protocol::Ip6(_) => Some(p),
_ => None
})
}
#[test]
fn replace_ip4_with_ip4() {
let server = multiaddr!(Ip4(Ipv4Addr::LOCALHOST), Tcp(10000u16));
let result = replace_ip_addr(&server, Protocol::Ip4([80, 81, 82, 83].into())).unwrap();
assert_eq!(result, multiaddr!(Ip4([80, 81, 82, 83]), Tcp(10000u16)))
}
#[test]
fn replace_ip6_with_ip4() {
let server = multiaddr!(Ip6(Ipv6Addr::LOCALHOST), Tcp(10000u16));
let result = replace_ip_addr(&server, Protocol::Ip4([80, 81, 82, 83].into())).unwrap();
assert_eq!(result, multiaddr!(Ip4([80, 81, 82, 83]), Tcp(10000u16)))
}
#[test]
fn replace_ip4_with_ip6() {
let server = multiaddr!(Ip4(Ipv4Addr::LOCALHOST), Tcp(10000u16));
let result = replace_ip_addr(&server, "2001:db8::1".parse::<Ipv6Addr>().unwrap().into());
assert_eq!(result.unwrap(), "/ip6/2001:db8::1/tcp/10000".parse::<Multiaddr>().unwrap())
}
#[test]
fn unknown_protocol_string() {
match "/unknown/1.2.3.4".parse::<Multiaddr>() {
Ok(_) => assert!(false, "The UnknownProtocolString error should be caused"),
Err(e) => match e {
crate::Error::UnknownProtocolString(protocol) => {
assert_eq!(protocol, "unknown")
},
_ => assert!(false, "The UnknownProtocolString error should be caused")
}
}
}
| 41.744361 | 193 | 0.615154 |
8fdba5c821bdf955bba809dca34a6d5cc926d50c | 2,004 | use super::*;
use ic_types::crypto::{AlgorithmId, CryptoError};
use std::convert::{From, TryFrom};
// From vector of bytes.
impl From<Vec<u8>> for SecretKeyBytes {
fn from(key: Vec<u8>) -> Self {
SecretKeyBytes(key)
}
}
// From vector of bytes.
impl From<Vec<u8>> for PublicKeyBytes {
fn from(key: Vec<u8>) -> Self {
PublicKeyBytes(key)
}
}
// From vector of bytes.
impl TryFrom<Vec<u8>> for SignatureBytes {
type Error = CryptoError;
fn try_from(sig: Vec<u8>) -> Result<Self, CryptoError> {
if sig.len() != Self::SIZE {
let sig_len = sig.len();
Err(CryptoError::MalformedSignature {
algorithm: AlgorithmId::EcdsaSecp256k1,
sig_bytes: sig,
internal_error: format!(
"SECP256K1 signature must have {} bytes, got {}.",
Self::SIZE,
sig_len
),
})
} else {
let mut bytes = [0u8; Self::SIZE];
bytes.copy_from_slice(&sig);
Ok(Self(bytes))
}
}
}
#[cfg(test)]
mod tests {
use crate::types::SignatureBytes;
use std::convert::TryFrom;
#[test]
fn should_convert_vector_to_signature_bytes() {
let bytes = vec![0; SignatureBytes::SIZE];
let _sig_bytes = SignatureBytes::try_from(bytes).expect("conversion failed");
}
#[test]
fn should_fail_conversion_to_signature_bytes_if_vector_too_long() {
let bytes = vec![0; SignatureBytes::SIZE + 1];
let result = SignatureBytes::try_from(bytes);
assert!(result.is_err());
assert!(result.unwrap_err().is_malformed_signature());
}
#[test]
fn should_fail_conversion_to_signature_bytes_if_vector_too_short() {
let bytes = vec![0; SignatureBytes::SIZE - 1];
let result = SignatureBytes::try_from(bytes);
assert!(result.is_err());
assert!(result.unwrap_err().is_malformed_signature());
}
}
| 29.043478 | 85 | 0.585828 |
d639c75ceb7feee67e332ac8f6186f174a8f361e | 2,290 | #![deny(warnings)]
#![feature(vec_push_all, test)]
extern crate hyper;
extern crate test;
use std::fmt;
use std::io::{self, Read, Write, Cursor};
use std::net::SocketAddr;
use hyper::net;
static README: &'static [u8] = include_bytes!("../README.md");
struct MockStream {
read: Cursor<Vec<u8>>
}
impl MockStream {
fn new() -> MockStream {
let head = b"HTTP/1.1 200 OK\r\nServer: Mock\r\n\r\n";
let mut res = head.to_vec();
res.push_all(README);
MockStream {
read: Cursor::new(res)
}
}
}
impl Clone for MockStream {
fn clone(&self) -> MockStream {
MockStream {
read: Cursor::new(self.read.get_ref().clone())
}
}
}
impl Read for MockStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.read.read(buf)
}
}
impl Write for MockStream {
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
// we're mocking, what do we care.
Ok(msg.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[derive(Clone, Debug)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name() -> &'static str {
"x-foo"
}
fn parse_header(_: &[Vec<u8>]) -> hyper::Result<Foo> {
Err(hyper::Error::Header)
}
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Bar")
}
}
impl net::NetworkStream for MockStream {
fn peer_addr(&mut self) -> io::Result<SocketAddr> {
Ok("127.0.0.1:1337".parse().unwrap())
}
}
struct MockConnector;
impl net::NetworkConnector for MockConnector {
type Stream = MockStream;
fn connect(&self, _: &str, _: u16, _: &str) -> hyper::Result<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_hyper(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req = hyper::client::Request::with_connector(
hyper::Get, hyper::Url::parse(url).unwrap(), &MockConnector
).unwrap();
req.headers_mut().set(Foo);
let mut s = String::new();
req
.start().unwrap()
.send().unwrap()
.read_to_string(&mut s).unwrap()
});
}
| 22.23301 | 78 | 0.561135 |
3a7361765afa6a19546ad18a2d975dc900aff80d | 5,506 | // Copyright 2021 The BMW Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use failure::{Context, Fail};
use std::fmt;
use std::fmt::Display;
#[derive(Debug)]
pub struct Error {
inner: Context<ErrorKind>,
}
#[derive(Debug, Fail)]
/// Wallet config error types
pub enum ErrorKind {
/// Seed Error
#[fail(display = "Seed Error occurred: {}", _0)]
SeedError(String),
/// SyncingError
#[fail(display = "Node is still syncing. Please wait: {}", _0)]
SyncingError(String),
/// Internal Error
#[fail(display = "Internal Error occurred: {}", _0)]
InternalError(String),
/// Illegal Argument
#[fail(display = "Illegal Argument Error: {}", _0)]
IllegalArgument(String),
/// Config Error
#[fail(display = "Config Error occurred: {}", _0)]
Config(String),
/// Encryption Error
#[fail(display = "Password mismatch")]
Encryption,
/// Generic Error
#[fail(display = "Generic Error: {}", _0)]
GenericError(String),
/// TryInto Error
#[fail(display = "TryInto Error: {}", _0)]
TryInto(String),
/// Insufficient Funds
#[fail(display = "Insufficient Funds: {}", _0)]
InsufficientFunds(String),
/// Keychain Error
#[fail(display = "Keychain Error: {}", _0)]
Keychain(String),
/// Mnemonic Error
#[fail(display = "Mnemonic Error")]
Mnemonic,
/// Path not found error
#[fail(display = "Path not found: {}", _0)]
PathNotFoundError(String),
/// IO Error
#[fail(display = "IO Error")]
IO,
/// Wallet Seed Doesn't Exist Error
#[fail(display = "Wallet Seed doesn't exist")]
WalletSeedDoesntExist,
/// Format Error
#[fail(display = "Format Error")]
Format,
/// Store Error
#[fail(display = "Store Error: {}", _0)]
StoreError(String),
/// LibWallet Error
#[fail(display = "LibWallet Error: {}", _0)]
LibWallet(String),
/// Wallet seed doesn't exist
#[fail(display = "Wallet doesn't exist at {}. {}", _0, _1)]
WalletDoesntExist(String, String),
/// Wallet Seeds Exists
#[fail(display = "Wallet Seed Exists: {}", _0)]
WalletSeedExists(String),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.inner, f)
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error {
inner: Context::new(kind),
}
}
}
impl From<Context<ErrorKind>> for Error {
fn from(inner: Context<ErrorKind>) -> Error {
Error { inner: inner }
}
}
impl From<bmw_wallet_util::grin_keychain::Error> for Error {
fn from(error: bmw_wallet_util::grin_keychain::Error) -> Error {
Error {
inner: Context::new(ErrorKind::Keychain(format!("{}", error))),
}
}
}
impl From<std::io::Error> for Error {
fn from(error: std::io::Error) -> Error {
Error {
inner: Context::new(ErrorKind::PathNotFoundError(format!("{}", error))),
}
}
}
impl From<Error> for bmw_wallet_libwallet::error::Error {
fn from(error: Error) -> bmw_wallet_libwallet::error::Error {
bmw_wallet_libwallet::error::Error {
inner: Context::new(bmw_wallet_libwallet::error::ErrorKind::ImplsError(format!(
"{}",
error
))),
}
}
}
impl From<bmw_wallet_util::grin_store::Error> for Error {
fn from(error: bmw_wallet_util::grin_store::Error) -> Error {
Error {
inner: Context::new(ErrorKind::StoreError(format!("{}", error))),
}
}
}
impl From<bmw_wallet_libwallet::error::Error> for Error {
fn from(error: bmw_wallet_libwallet::error::Error) -> Error {
Error {
inner: Context::new(ErrorKind::LibWallet(format!("{}", error))),
}
}
}
impl From<bmw_wallet_config::error::Error> for Error {
fn from(error: bmw_wallet_config::error::Error) -> Error {
Error {
inner: Context::new(ErrorKind::Config(format!("{}", error))),
}
}
}
impl From<std::num::TryFromIntError> for Error {
fn from(error: std::num::TryFromIntError) -> Error {
Error {
inner: Context::new(ErrorKind::TryInto(format!("{}", error))),
}
}
}
impl From<bmw_wallet_libwallet::ErrorKind> for Error {
fn from(error: bmw_wallet_libwallet::ErrorKind) -> Error {
Error {
inner: Context::new(ErrorKind::InternalError(format!("Libwallet: {}", error))),
}
}
}
impl From<bmw_wallet_util::grin_core::libtx::Error> for Error {
fn from(error: bmw_wallet_util::grin_core::libtx::Error) -> Error {
Error {
inner: Context::new(ErrorKind::InternalError(format!("Libtx: {}", error))),
}
}
}
impl From<bmw_wallet_util::grin_util::secp::Error> for Error {
fn from(error: bmw_wallet_util::grin_util::secp::Error) -> Error {
Error {
inner: Context::new(ErrorKind::InternalError(format!("secp: {}", error))),
}
}
}
impl From<bmw_wallet_util::grin_core::ser::Error> for Error {
fn from(error: bmw_wallet_util::grin_core::ser::Error) -> Error {
Error {
inner: Context::new(ErrorKind::InternalError(format!("ser error: {}", error))),
}
}
}
impl From<bmw_wallet_util::grin_core::address::Error> for Error {
fn from(error: bmw_wallet_util::grin_core::address::Error) -> Error {
Error {
inner: Context::new(ErrorKind::InternalError(format!(
"address error: {}",
error
))),
}
}
}
| 26.599034 | 82 | 0.671449 |
fba0f59ceb3d9ee6f526de144570e8f81a424ccd | 573 | //! The structs used for the actual parsing.
//!
//! User of the crate shouldn't have need to use these directly, but use functions that create them instead.
pub(crate) mod basic;
pub(crate) mod control;
pub(crate) mod delimited;
pub(crate) mod repeating;
pub use self::control::{Alt, Empty, Map, FlatMap, Eat, Opt, Satisfying, FlatMapErr};
pub use self::delimited::{Preceded, Terminated, Delimited};
pub use self::repeating::{Many0, Many1, List0, TakeWhile0, TakeWhile1, TakeUntil, Whitespace, Take, TakeNM};
pub use self::basic::{Tag, Symbol, Fst, Fun, Dbg, Constant}; | 44.076923 | 108 | 0.736475 |
2f82c49fe592d90bafcafdc2dcfef8dd01042e27 | 7,051 | // Copyright (c) Facebook Inc.
// SPDX-License-Identifier: Apache-2.0
use crate::transport::NetworkProtocol;
use fastpay_core::{
base_types::*,
client::ClientState,
messages::{Address, CertifiedTransferOrder},
};
use serde::{Deserialize, Serialize};
use std::{
collections::BTreeMap,
fs::{self, File, OpenOptions},
io::{BufRead, BufReader, BufWriter, Write},
};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AuthorityConfig {
pub network_protocol: NetworkProtocol,
#[serde(
serialize_with = "address_as_base64",
deserialize_with = "address_from_base64"
)]
pub address: FastPayAddress,
pub host: String,
pub base_port: u32,
pub num_shards: u32,
}
impl AuthorityConfig {
pub fn print(&self) {
let data = serde_json::to_string(self).unwrap();
println!("{}", data);
}
}
#[derive(Serialize, Deserialize)]
pub struct AuthorityServerConfig {
pub authority: AuthorityConfig,
pub key: KeyPair,
}
impl AuthorityServerConfig {
pub fn read(path: &str) -> Result<Self, std::io::Error> {
let data = fs::read(path)?;
Ok(serde_json::from_slice(data.as_slice())?)
}
pub fn write(&self, path: &str) -> Result<(), std::io::Error> {
let file = OpenOptions::new().create(true).write(true).open(path)?;
let mut writer = BufWriter::new(file);
let data = serde_json::to_string_pretty(self).unwrap();
writer.write_all(data.as_ref())?;
writer.write_all(b"\n")?;
Ok(())
}
}
pub struct CommitteeConfig {
pub authorities: Vec<AuthorityConfig>,
}
impl CommitteeConfig {
pub fn read(path: &str) -> Result<Self, std::io::Error> {
let file = File::open(path)?;
let reader = BufReader::new(file);
let stream = serde_json::Deserializer::from_reader(reader).into_iter();
Ok(Self {
authorities: stream.filter_map(Result::ok).collect(),
})
}
pub fn write(&self, path: &str) -> Result<(), std::io::Error> {
let file = OpenOptions::new().create(true).write(true).open(path)?;
let mut writer = BufWriter::new(file);
for config in &self.authorities {
serde_json::to_writer(&mut writer, config)?;
writer.write_all(b"\n")?;
}
Ok(())
}
pub fn voting_rights(&self) -> BTreeMap<AuthorityName, usize> {
let mut map = BTreeMap::new();
for authority in &self.authorities {
map.insert(authority.address, 1);
}
map
}
}
#[derive(Serialize, Deserialize)]
pub struct UserAccount {
#[serde(
serialize_with = "address_as_base64",
deserialize_with = "address_from_base64"
)]
pub address: FastPayAddress,
pub key: KeyPair,
pub next_sequence_number: SequenceNumber,
pub balance: Balance,
pub sent_certificates: Vec<CertifiedTransferOrder>,
pub received_certificates: Vec<CertifiedTransferOrder>,
}
impl UserAccount {
pub fn new(balance: Balance) -> Self {
let (address, key) = get_key_pair();
Self {
address,
key,
next_sequence_number: SequenceNumber::new(),
balance,
sent_certificates: Vec::new(),
received_certificates: Vec::new(),
}
}
}
pub struct AccountsConfig {
accounts: BTreeMap<FastPayAddress, UserAccount>,
}
impl AccountsConfig {
pub fn get(&self, address: &FastPayAddress) -> Option<&UserAccount> {
self.accounts.get(address)
}
pub fn insert(&mut self, account: UserAccount) {
self.accounts.insert(account.address, account);
}
pub fn num_accounts(&self) -> usize {
self.accounts.len()
}
pub fn accounts_mut(&mut self) -> impl Iterator<Item = &mut UserAccount> {
self.accounts.values_mut()
}
pub fn update_from_state<A>(&mut self, state: &ClientState<A>) {
let account = self
.accounts
.get_mut(&state.address())
.expect("Updated account should already exist");
account.next_sequence_number = state.next_sequence_number();
account.balance = state.balance();
account.sent_certificates = state.sent_certificates().clone();
account.received_certificates = state.received_certificates().cloned().collect();
}
pub fn update_for_received_transfer(&mut self, certificate: CertifiedTransferOrder) {
let transfer = &certificate.value.transfer;
if let Address::FastPay(recipient) = &transfer.recipient {
if let Some(config) = self.accounts.get_mut(recipient) {
if let Err(position) = config
.received_certificates
.binary_search_by_key(&certificate.key(), CertifiedTransferOrder::key)
{
config.balance = config.balance.try_add(transfer.amount.into()).unwrap();
config.received_certificates.insert(position, certificate)
}
}
}
}
pub fn read_or_create(path: &str) -> Result<Self, std::io::Error> {
let file = OpenOptions::new()
.create(true)
.write(true)
.read(true)
.open(path)?;
let reader = BufReader::new(file);
let stream = serde_json::Deserializer::from_reader(reader).into_iter();
Ok(Self {
accounts: stream
.filter_map(Result::ok)
.map(|account: UserAccount| (account.address, account))
.collect(),
})
}
pub fn write(&self, path: &str) -> Result<(), std::io::Error> {
let file = OpenOptions::new().write(true).open(path)?;
let mut writer = BufWriter::new(file);
for account in self.accounts.values() {
serde_json::to_writer(&mut writer, account)?;
writer.write_all(b"\n")?;
}
Ok(())
}
}
pub struct InitialStateConfig {
pub accounts: Vec<(FastPayAddress, Balance)>,
}
impl InitialStateConfig {
pub fn read(path: &str) -> Result<Self, failure::Error> {
let file = File::open(path)?;
let reader = BufReader::new(file);
let mut accounts = Vec::new();
for line in reader.lines() {
let line = line?;
let elements = line.split(':').collect::<Vec<_>>();
if elements.len() != 2 {
failure::bail!("expecting two columns separated with ':'")
}
let address = decode_address(elements[0])?;
let balance = elements[1].parse()?;
accounts.push((address, balance));
}
Ok(Self { accounts })
}
pub fn write(&self, path: &str) -> Result<(), std::io::Error> {
let file = OpenOptions::new().create(true).write(true).open(path)?;
let mut writer = BufWriter::new(file);
for (address, balance) in &self.accounts {
writeln!(writer, "{}:{}", encode_address(address), balance)?;
}
Ok(())
}
}
| 31.199115 | 93 | 0.590555 |
d7c8466b22c6c9492c63ad69bb05074ded74a01c | 25,846 | // Copyright 2015-2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Build the non-Rust components.
// It seems like it would be a good idea to use `log!` for logging, but it
// isn't worth having the external dependencies (one for the `log` crate, and
// another for the concrete logging implementation). Instead we use `eprintln!`
// to log everything to stderr.
#![forbid(
anonymous_parameters,
box_pointers,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results,
variant_size_differences,
warnings
)]
// In the `pregenerate_asm_main()` case we don't want to access (Cargo)
// environment variables at all, so avoid `use std::env` here.
use std::{
fs::{self, DirEntry},
path::{Path, PathBuf},
process::Command,
time::SystemTime,
};
const X86: &str = "x86";
const X86_64: &str = "x86_64";
const AARCH64: &str = "aarch64";
const ARM: &str = "arm";
#[cfg_attr(rustfmt, rustfmt_skip)]
const RING_SRCS: &[(&[&str], &str)] = &[
(&[], "crypto/fipsmodule/aes/aes_nohw.c"),
(&[], "crypto/fipsmodule/bn/montgomery.c"),
(&[], "crypto/fipsmodule/bn/montgomery_inv.c"),
(&[], "crypto/limbs/limbs.c"),
(&[], "crypto/mem.c"),
(&[AARCH64, ARM, X86_64, X86], "crypto/crypto.c"),
(&[AARCH64, ARM, X86_64, X86], "crypto/curve25519/curve25519.c"),
(&[AARCH64, ARM, X86_64, X86], "crypto/fipsmodule/ec/ecp_nistz.c"),
(&[AARCH64, ARM, X86_64, X86], "crypto/fipsmodule/ec/ecp_nistz256.c"),
(&[AARCH64, ARM, X86_64, X86], "crypto/fipsmodule/ec/gfp_p256.c"),
(&[AARCH64, ARM, X86_64, X86], "crypto/fipsmodule/ec/gfp_p384.c"),
(&[X86_64, X86], "crypto/cpu-intel.c"),
(&[X86], "crypto/fipsmodule/aes/asm/aesni-x86.pl"),
(&[X86], "crypto/fipsmodule/aes/asm/vpaes-x86.pl"),
(&[X86], "crypto/fipsmodule/bn/asm/x86-mont.pl"),
(&[X86], "crypto/chacha/asm/chacha-x86.pl"),
(&[X86], "crypto/fipsmodule/ec/asm/ecp_nistz256-x86.pl"),
(&[X86], "crypto/fipsmodule/modes/asm/ghash-x86.pl"),
(&[X86], "crypto/poly1305/asm/poly1305-x86.pl"),
(&[X86_64], "crypto/fipsmodule/aes/asm/aesni-x86_64.pl"),
(&[X86_64], "crypto/fipsmodule/aes/asm/vpaes-x86_64.pl"),
(&[X86_64], "crypto/fipsmodule/bn/asm/x86_64-mont.pl"),
(&[X86_64], "crypto/fipsmodule/bn/asm/x86_64-mont5.pl"),
(&[X86_64], "crypto/chacha/asm/chacha-x86_64.pl"),
(&[X86_64], "crypto/fipsmodule/ec/asm/p256-x86_64-asm.pl"),
(&[X86_64], "crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl"),
(&[X86_64], "crypto/fipsmodule/modes/asm/ghash-x86_64.pl"),
(&[X86_64], "crypto/poly1305/asm/poly1305-x86_64.pl"),
(&[X86_64], SHA512_X86_64),
(&[AARCH64, ARM], "crypto/fipsmodule/aes/asm/aesv8-armx.pl"),
(&[AARCH64, ARM], "crypto/fipsmodule/modes/asm/ghashv8-armx.pl"),
(&[ARM], "crypto/fipsmodule/aes/asm/bsaes-armv7.pl"),
(&[ARM], "crypto/fipsmodule/aes/asm/vpaes-armv7.pl"),
(&[ARM], "crypto/fipsmodule/bn/asm/armv4-mont.pl"),
(&[ARM], "crypto/chacha/asm/chacha-armv4.pl"),
(&[ARM], "crypto/curve25519/asm/x25519-asm-arm.S"),
(&[ARM], "crypto/fipsmodule/ec/asm/ecp_nistz256-armv4.pl"),
(&[ARM], "crypto/fipsmodule/modes/asm/ghash-armv4.pl"),
(&[ARM], "crypto/poly1305/asm/poly1305-armv4.pl"),
(&[ARM], "crypto/fipsmodule/sha/asm/sha256-armv4.pl"),
(&[ARM], "crypto/fipsmodule/sha/asm/sha512-armv4.pl"),
(&[AARCH64], "crypto/fipsmodule/aes/asm/vpaes-armv8.pl"),
(&[AARCH64], "crypto/fipsmodule/bn/asm/armv8-mont.pl"),
(&[AARCH64], "crypto/chacha/asm/chacha-armv8.pl"),
(&[AARCH64], "crypto/fipsmodule/ec/asm/ecp_nistz256-armv8.pl"),
(&[AARCH64], "crypto/fipsmodule/modes/asm/ghash-neon-armv8.pl"),
(&[AARCH64], "crypto/poly1305/asm/poly1305-armv8.pl"),
(&[AARCH64], SHA512_ARMV8),
];
const SHA256_X86_64: &str = "crypto/fipsmodule/sha/asm/sha256-x86_64.pl";
const SHA512_X86_64: &str = "crypto/fipsmodule/sha/asm/sha512-x86_64.pl";
const SHA256_ARMV8: &str = "crypto/fipsmodule/sha/asm/sha256-armv8.pl";
const SHA512_ARMV8: &str = "crypto/fipsmodule/sha/asm/sha512-armv8.pl";
const RING_TEST_SRCS: &[&str] = &[("crypto/constant_time_test.c")];
#[cfg_attr(rustfmt, rustfmt_skip)]
const RING_INCLUDES: &[&str] =
&[
"crypto/curve25519/curve25519_tables.h",
"crypto/curve25519/internal.h",
"crypto/fipsmodule/bn/internal.h",
"crypto/fipsmodule/ec/ecp_nistz256_table.inl",
"crypto/fipsmodule/ec/ecp_nistz384.inl",
"crypto/fipsmodule/ec/ecp_nistz.h",
"crypto/fipsmodule/ec/ecp_nistz384.h",
"crypto/fipsmodule/ec/ecp_nistz256.h",
"crypto/internal.h",
"crypto/limbs/limbs.h",
"crypto/limbs/limbs.inl",
"crypto/fipsmodule/modes/internal.h",
"include/GFp/aes.h",
"include/GFp/arm_arch.h",
"include/GFp/base.h",
"include/GFp/check.h",
"include/GFp/cpu.h",
"include/GFp/mem.h",
"include/GFp/type_check.h",
"third_party/fiat/curve25519_32.h",
"third_party/fiat/curve25519_64.h",
];
#[cfg_attr(rustfmt, rustfmt_skip)]
const RING_PERL_INCLUDES: &[&str] =
&["crypto/perlasm/arm-xlate.pl",
"crypto/perlasm/x86gas.pl",
"crypto/perlasm/x86nasm.pl",
"crypto/perlasm/x86asm.pl",
"crypto/perlasm/x86_64-xlate.pl"];
const RING_BUILD_FILE: &[&str] = &["build.rs"];
const PREGENERATED: &str = "pregenerated";
fn c_flags(target: &Target) -> &'static [&'static str] {
if target.env != MSVC {
static NON_MSVC_FLAGS: &[&str] = &[
"-std=c1x", // GCC 4.6 requires "c1x" instead of "c11"
"-Wbad-function-cast",
"-Wnested-externs",
"-Wstrict-prototypes",
];
NON_MSVC_FLAGS
} else {
&[]
}
}
fn cpp_flags(target: &Target) -> &'static [&'static str] {
if target.env != MSVC {
static NON_MSVC_FLAGS: &[&str] = &[
"-pedantic",
"-pedantic-errors",
"-Wall",
"-Wextra",
"-Wcast-align",
"-Wcast-qual",
"-Wconversion",
"-Wenum-compare",
"-Wfloat-equal",
"-Wformat=2",
"-Winline",
"-Winvalid-pch",
"-Wmissing-field-initializers",
"-Wmissing-include-dirs",
"-Wredundant-decls",
"-Wshadow",
"-Wsign-compare",
"-Wsign-conversion",
"-Wundef",
"-Wuninitialized",
"-Wwrite-strings",
"-fno-strict-aliasing",
"-fvisibility=hidden",
];
NON_MSVC_FLAGS
} else {
static MSVC_FLAGS: &[&str] = &[
"/GS", // Buffer security checks.
"/Gy", // Enable function-level linking.
"/EHsc", // C++ exceptions only, only in C++.
"/GR-", // Disable RTTI.
"/Zc:wchar_t",
"/Zc:forScope",
"/Zc:inline",
"/Zc:rvalueCast",
// Warnings.
"/sdl",
"/Wall",
"/wd4127", // C4127: conditional expression is constant
"/wd4464", // C4464: relative include path contains '..'
"/wd4514", // C4514: <name>: unreferenced inline function has be
"/wd4710", // C4710: function not inlined
"/wd4711", // C4711: function 'function' selected for inline expansion
"/wd4820", // C4820: <struct>: <n> bytes padding added after <name>
"/wd5045", /* C5045: Compiler will insert Spectre mitigation for memory load if
* /Qspectre switch specified */
];
MSVC_FLAGS
}
}
const LD_FLAGS: &[&str] = &[];
// None means "any OS" or "any target". The first match in sequence order is
// taken.
const ASM_TARGETS: &[(&str, Option<&str>, Option<&str>)] = &[
("x86_64", Some("ios"), Some("macosx")),
("x86_64", Some("macos"), Some("macosx")),
("x86_64", Some(WINDOWS), Some("nasm")),
("x86_64", None, Some("elf")),
("aarch64", Some("ios"), Some("ios64")),
("aarch64", None, Some("linux64")),
("x86", Some(WINDOWS), Some("win32n")),
("x86", Some("ios"), Some("macosx")),
("x86", None, Some("elf")),
("arm", Some("ios"), Some("ios32")),
("arm", None, Some("linux32")),
("wasm32", None, None),
];
const WINDOWS: &str = "windows";
const MSVC: &str = "msvc";
const MSVC_OBJ_OPT: &str = "/Fo";
const MSVC_OBJ_EXT: &str = "obj";
fn main() {
if let Ok(package_name) = std::env::var("CARGO_PKG_NAME") {
if package_name == "ring" {
ring_build_rs_main();
return;
}
}
pregenerate_asm_main();
}
fn ring_build_rs_main() {
use std::env;
for (key, value) in env::vars() {
eprintln!("ENV {}={}", key, value);
}
let out_dir = env::var("OUT_DIR").unwrap();
let out_dir = PathBuf::from(out_dir);
let arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
let os = env::var("CARGO_CFG_TARGET_OS").unwrap();
let env = env::var("CARGO_CFG_TARGET_ENV").unwrap();
let (obj_ext, obj_opt) = if env == MSVC {
(MSVC_OBJ_EXT, MSVC_OBJ_OPT)
} else {
("o", "-o")
};
let is_git = std::fs::metadata(".git").is_ok();
// Published builds are always release builds.
let is_debug = is_git && env::var("DEBUG").unwrap() != "false";
let target = Target {
arch,
os,
env,
obj_ext,
obj_opt,
is_git,
is_debug,
};
let pregenerated = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()).join(PREGENERATED);
build_c_code(&target, pregenerated, &out_dir);
check_all_files_tracked()
}
fn pregenerate_asm_main() {
let pregenerated = PathBuf::from(PREGENERATED);
std::fs::create_dir(&pregenerated).unwrap();
let pregenerated_tmp = pregenerated.join("tmp");
std::fs::create_dir(&pregenerated_tmp).unwrap();
for &(target_arch, target_os, perlasm_format) in ASM_TARGETS {
// For Windows, package pregenerated object files instead of
// pregenerated assembly language source files, so that the user
// doesn't need to install the assembler.
let asm_dir = if target_os == Some(WINDOWS) {
&pregenerated_tmp
} else {
&pregenerated
};
if let Some(perlasm_format) = perlasm_format {
let perlasm_src_dsts =
perlasm_src_dsts(&asm_dir, target_arch, target_os, perlasm_format);
perlasm(&perlasm_src_dsts, target_arch, perlasm_format, None);
if target_os == Some(WINDOWS) {
let srcs = asm_srcs(perlasm_src_dsts);
for src in srcs {
let src_path = PathBuf::from(src);
let obj_path = obj_path(&pregenerated, &src_path, MSVC_OBJ_EXT);
run_command(yasm(&src_path, target_arch, &obj_path));
}
}
}
}
}
struct Target {
arch: String,
os: String,
env: String,
obj_ext: &'static str,
obj_opt: &'static str,
is_git: bool,
is_debug: bool,
}
fn build_c_code(target: &Target, pregenerated: PathBuf, out_dir: &Path) {
#[cfg(not(feature = "wasm32_c"))]
{
if &target.arch == "wasm32" {
return;
}
}
let includes_modified = RING_INCLUDES
.iter()
.chain(RING_BUILD_FILE.iter())
.chain(RING_PERL_INCLUDES.iter())
.map(|f| file_modified(Path::new(*f)))
.max()
.unwrap();
fn is_none_or_equals<T>(opt: Option<T>, other: T) -> bool
where
T: PartialEq,
{
if let Some(value) = opt {
value == other
} else {
true
}
}
let (_, _, perlasm_format) = ASM_TARGETS
.iter()
.find(|entry| {
let &(entry_arch, entry_os, _) = *entry;
entry_arch == &target.arch && is_none_or_equals(entry_os, &target.os)
})
.unwrap();
let use_pregenerated = !target.is_git;
let warnings_are_errors = target.is_git;
let asm_dir = if use_pregenerated {
&pregenerated
} else {
out_dir
};
let asm_srcs = if let Some(perlasm_format) = perlasm_format {
let perlasm_src_dsts =
perlasm_src_dsts(asm_dir, &target.arch, Some(&target.os), perlasm_format);
if !use_pregenerated {
perlasm(
&perlasm_src_dsts[..],
&target.arch,
perlasm_format,
Some(includes_modified),
);
}
let mut asm_srcs = asm_srcs(perlasm_src_dsts);
// For Windows we also pregenerate the object files for non-Git builds so
// the user doesn't need to install the assembler. On other platforms we
// assume the C compiler also assembles.
if use_pregenerated && &target.os == WINDOWS {
// The pregenerated object files always use ".obj" as the extension,
// even when the C/C++ compiler outputs files with the ".o" extension.
asm_srcs = asm_srcs
.iter()
.map(|src| obj_path(&pregenerated, src.as_path(), "obj"))
.collect::<Vec<_>>();
}
asm_srcs
} else {
Vec::new()
};
let core_srcs = sources_for_arch(&target.arch)
.into_iter()
.filter(|p| !is_perlasm(&p))
.collect::<Vec<_>>();
let test_srcs = RING_TEST_SRCS.iter().map(PathBuf::from).collect::<Vec<_>>();
let libs = [
("ring-core", &core_srcs[..], &asm_srcs[..]),
("ring-test", &test_srcs[..], &[]),
];
// XXX: Ideally, ring-test would only be built for `cargo test`, but Cargo
// can't do that yet.
libs.iter().for_each(|&(lib_name, srcs, additional_srcs)| {
build_library(
&target,
&out_dir,
lib_name,
srcs,
additional_srcs,
warnings_are_errors,
includes_modified,
)
});
println!(
"cargo:rustc-link-search=native={}",
out_dir.to_str().expect("Invalid path")
);
}
fn build_library(
target: &Target,
out_dir: &Path,
lib_name: &str,
srcs: &[PathBuf],
additional_srcs: &[PathBuf],
warnings_are_errors: bool,
includes_modified: SystemTime,
) {
// Compile all the (dirty) source files into object files.
let objs = additional_srcs
.into_iter()
.chain(srcs.into_iter())
.filter(|f| &target.env != "msvc" || f.extension().unwrap().to_str().unwrap() != "S")
.map(|f| compile(f, target, warnings_are_errors, out_dir, includes_modified))
.collect::<Vec<_>>();
// Rebuild the library if necessary.
let lib_path = PathBuf::from(out_dir).join(format!("lib{}.a", lib_name));
if objs
.iter()
.map(Path::new)
.any(|p| need_run(&p, &lib_path, includes_modified))
{
let mut c = cc::Build::new();
for f in LD_FLAGS {
let _ = c.flag(&f);
}
match target.os.as_str() {
"macos" => {
let _ = c.flag("-fPIC");
let _ = c.flag("-Wl,-dead_strip");
}
_ => {
let _ = c.flag("-Wl,--gc-sections".into());
}
}
for o in objs {
let _ = c.object(o);
}
// Handled below.
let _ = c.cargo_metadata(false);
c.compile(
lib_path
.file_name()
.and_then(|f| f.to_str())
.expect("No filename"),
);
}
// Link the library. This works even when the library doesn't need to be
// rebuilt.
println!("cargo:rustc-link-lib=static={}", lib_name);
}
fn compile(
p: &Path,
target: &Target,
warnings_are_errors: bool,
out_dir: &Path,
includes_modified: SystemTime,
) -> String {
let ext = p.extension().unwrap().to_str().unwrap();
if ext == "obj" {
p.to_str().expect("Invalid path").into()
} else {
let mut out_path = out_dir.clone().join(p.file_name().unwrap());
assert!(out_path.set_extension(target.obj_ext));
if need_run(&p, &out_path, includes_modified) {
let cmd = if &target.os != WINDOWS || ext != "asm" {
cc(p, ext, target, warnings_are_errors, &out_path)
} else {
yasm(p, &target.arch, &out_path)
};
run_command(cmd);
}
out_path.to_str().expect("Invalid path").into()
}
}
fn obj_path(out_dir: &Path, src: &Path, obj_ext: &str) -> PathBuf {
let mut out_path = out_dir.clone().join(src.file_name().unwrap());
assert!(out_path.set_extension(obj_ext));
out_path
}
fn cc(
file: &Path,
ext: &str,
target: &Target,
warnings_are_errors: bool,
out_dir: &Path,
) -> Command {
let mut c = cc::Build::new();
let _ = c.include("include");
match ext {
"c" => {
for f in c_flags(target) {
let _ = c.flag(f);
}
}
"S" => (),
e => panic!("Unsupported file extension: {:?}", e),
};
for f in cpp_flags(target) {
let _ = c.flag(&f);
}
if &target.os != "none"
&& &target.os != "redox"
&& &target.os != "windows"
&& target.arch != "wasm32"
{
let _ = c.flag("-fstack-protector");
}
match (target.os.as_str(), target.env.as_str()) {
// ``-gfull`` is required for Darwin's |-dead_strip|.
("macos", _) => {
let _ = c.flag("-gfull");
}
(_, "msvc") => (),
_ => {
let _ = c.flag("-g3");
}
};
if !target.is_debug {
let _ = c.define("NDEBUG", None);
}
if &target.env == "msvc" {
if std::env::var("OPT_LEVEL").unwrap() == "0" {
let _ = c.flag("/Od"); // Disable optimization for debug builds.
// run-time checking: (s)tack frame, (u)ninitialized variables
let _ = c.flag("/RTCsu");
} else {
let _ = c.flag("/Ox"); // Enable full optimization.
}
}
if (target.arch.as_str(), target.os.as_str()) == ("wasm32", "unknown") {
let _ = c.flag("--no-standard-libraries");
}
if warnings_are_errors {
let flag = if &target.env != "msvc" {
"-Werror"
} else {
"/WX"
};
let _ = c.flag(flag);
}
if &target.env == "musl" {
// Some platforms enable _FORTIFY_SOURCE by default, but musl
// libc doesn't support it yet. See
// http://wiki.musl-libc.org/wiki/Future_Ideas#Fortify
// http://www.openwall.com/lists/musl/2015/02/04/3
// http://www.openwall.com/lists/musl/2015/06/17/1
let _ = c.flag("-U_FORTIFY_SOURCE");
}
let mut c = c.get_compiler().to_command();
let _ = c
.arg("-c")
.arg(format!(
"{}{}",
target.obj_opt,
out_dir.to_str().expect("Invalid path")
))
.arg(file);
c
}
fn yasm(file: &Path, arch: &str, out_file: &Path) -> Command {
let (oformat, machine) = match arch {
"x86_64" => ("--oformat=win64", "--machine=amd64"),
"x86" => ("--oformat=win32", "--machine=x86"),
_ => panic!("unsupported arch: {}", arch),
};
let mut c = Command::new("yasm.exe");
let _ = c
.arg("-X")
.arg("vc")
.arg("--dformat=cv8")
.arg(oformat)
.arg(machine)
.arg("-o")
.arg(out_file.to_str().expect("Invalid path"))
.arg(file);
c
}
fn run_command_with_args<S>(command_name: S, args: &[String])
where
S: AsRef<std::ffi::OsStr> + Copy,
{
let mut cmd = Command::new(command_name);
let _ = cmd.args(args);
run_command(cmd)
}
fn run_command(mut cmd: Command) {
eprintln!("running {:?}", cmd);
let status = cmd.status().unwrap_or_else(|e| {
panic!("failed to execute [{:?}]: {}", cmd, e);
});
if !status.success() {
panic!("execution failed");
}
}
fn sources_for_arch(arch: &str) -> Vec<PathBuf> {
RING_SRCS
.iter()
.filter(|&&(archs, _)| archs.is_empty() || archs.contains(&arch))
.map(|&(_, p)| PathBuf::from(p))
.collect::<Vec<_>>()
}
fn perlasm_src_dsts(
out_dir: &Path,
arch: &str,
os: Option<&str>,
perlasm_format: &str,
) -> Vec<(PathBuf, PathBuf)> {
let srcs = sources_for_arch(arch);
let mut src_dsts = srcs
.iter()
.filter(|p| is_perlasm(p))
.map(|src| (src.clone(), asm_path(out_dir, src, os, perlasm_format)))
.collect::<Vec<_>>();
// Some PerlAsm source files need to be run multiple times with different
// output paths.
{
// Appease the borrow checker.
let mut maybe_synthesize = |concrete, synthesized| {
let concrete_path = PathBuf::from(concrete);
if srcs.contains(&concrete_path) {
let synthesized_path = PathBuf::from(synthesized);
src_dsts.push((
concrete_path,
asm_path(out_dir, &synthesized_path, os, perlasm_format),
))
}
};
maybe_synthesize(SHA512_X86_64, SHA256_X86_64);
maybe_synthesize(SHA512_ARMV8, SHA256_ARMV8);
}
src_dsts
}
fn asm_srcs(perlasm_src_dsts: Vec<(PathBuf, PathBuf)>) -> Vec<PathBuf> {
perlasm_src_dsts
.into_iter()
.map(|(_src, dst)| dst)
.collect::<Vec<_>>()
}
fn is_perlasm(path: &PathBuf) -> bool {
path.extension().unwrap().to_str().unwrap() == "pl"
}
fn asm_path(out_dir: &Path, src: &Path, os: Option<&str>, perlasm_format: &str) -> PathBuf {
let src_stem = src.file_stem().expect("source file without basename");
let dst_stem = src_stem.to_str().unwrap();
let dst_extension = if os == Some("windows") { "asm" } else { "S" };
let dst_filename = format!("{}-{}.{}", dst_stem, perlasm_format, dst_extension);
out_dir.join(dst_filename)
}
fn perlasm(
src_dst: &[(PathBuf, PathBuf)],
arch: &str,
perlasm_format: &str,
includes_modified: Option<SystemTime>,
) {
for (src, dst) in src_dst {
if let Some(includes_modified) = includes_modified {
if !need_run(src, dst, includes_modified) {
continue;
}
}
let mut args = Vec::<String>::new();
args.push(src.to_string_lossy().into_owned());
args.push(perlasm_format.to_owned());
if arch == "x86" {
args.push("-fPIC".into());
args.push("-DOPENSSL_IA32_SSE2".into());
}
// Work around PerlAsm issue for ARM and AAarch64 targets by replacing
// back slashes with forward slashes.
let dst = dst
.to_str()
.expect("Could not convert path")
.replace("\\", "/");
args.push(dst);
run_command_with_args(&get_command("PERL_EXECUTABLE", "perl"), &args);
}
}
fn need_run(source: &Path, target: &Path, includes_modified: SystemTime) -> bool {
let s_modified = file_modified(source);
if let Ok(target_metadata) = std::fs::metadata(target) {
let target_modified = target_metadata.modified().unwrap();
s_modified >= target_modified || includes_modified >= target_modified
} else {
// On error fetching metadata for the target file, assume the target
// doesn't exist.
true
}
}
fn file_modified(path: &Path) -> SystemTime {
let path = Path::new(path);
let path_as_str = format!("{:?}", path);
std::fs::metadata(path)
.expect(&path_as_str)
.modified()
.expect("nah")
}
fn get_command(var: &str, default: &str) -> String {
std::env::var(var).unwrap_or(default.into())
}
fn check_all_files_tracked() {
for path in &["crypto", "include", "third_party/fiat"] {
walk_dir(&PathBuf::from(path), &is_tracked);
}
}
fn is_tracked(file: &DirEntry) {
let p = file.path();
let cmp = |f| p == PathBuf::from(f);
let tracked = match p.extension().and_then(|p| p.to_str()) {
Some("h") | Some("inl") => RING_INCLUDES.iter().any(cmp),
Some("c") | Some("S") | Some("asm") => {
RING_SRCS.iter().any(|(_, f)| cmp(f)) || RING_TEST_SRCS.iter().any(cmp)
}
Some("pl") => RING_SRCS.iter().any(|(_, f)| cmp(f)) || RING_PERL_INCLUDES.iter().any(cmp),
_ => true,
};
if !tracked {
panic!("{:?} is not tracked in build.rs", p);
}
}
fn walk_dir<F>(dir: &Path, cb: &F)
where
F: Fn(&DirEntry),
{
if dir.is_dir() {
for entry in fs::read_dir(dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
if path.is_dir() {
walk_dir(&path, cb);
} else {
cb(&entry);
}
}
}
}
| 31.366505 | 98 | 0.563453 |
4ac5c667c30e26b3ef3fa0f0e7d6de830dce353d | 11,620 | use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec};
use crate::config::{self, Config};
use crate::connect_tls::connect_tls;
use crate::maybe_tls_stream::MaybeTlsStream;
use crate::tls::{ChannelBinding, TlsConnect};
use crate::{Client, Connection, Error};
use fallible_iterator::FallibleIterator;
use futures::channel::mpsc;
use futures::{ready, Sink, SinkExt, Stream, TryStreamExt};
use postgres_protocol::authentication;
use postgres_protocol::authentication::sasl;
use postgres_protocol::authentication::sasl::ScramSha256;
use postgres_protocol::message::backend::{AuthenticationSaslBody, Message};
use postgres_protocol::message::frontend;
use std::collections::HashMap;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::codec::Framed;
use tokio::io::{AsyncRead, AsyncWrite};
pub struct StartupStream<S, T> {
inner: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
buf: BackendMessages,
}
impl<S, T> Sink<FrontendMessage> for StartupStream<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
type Error = io::Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_ready(cx)
}
fn start_send(mut self: Pin<&mut Self>, item: FrontendMessage) -> io::Result<()> {
Pin::new(&mut self.inner).start_send(item)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_close(cx)
}
}
impl<S, T> Stream for StartupStream<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
type Item = io::Result<Message>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<io::Result<Message>>> {
loop {
match self.buf.next() {
Ok(Some(message)) => return Poll::Ready(Some(Ok(message))),
Ok(None) => {}
Err(e) => return Poll::Ready(Some(Err(e))),
}
match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
Some(Ok(BackendMessage::Normal { messages, .. })) => self.buf = messages,
Some(Ok(BackendMessage::Async(message))) => return Poll::Ready(Some(Ok(message))),
Some(Err(e)) => return Poll::Ready(Some(Err(e))),
None => return Poll::Ready(None),
}
}
}
}
pub async fn connect_raw<S, T>(
stream: S,
tls: T,
config: &Config,
) -> Result<(Client, Connection<S, T::Stream>), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsConnect<S>,
{
let (stream, channel_binding) = connect_tls(stream, config.ssl_mode, tls).await?;
let mut stream = StartupStream {
inner: Framed::new(stream, PostgresCodec),
buf: BackendMessages::empty(),
};
startup(&mut stream, config).await?;
authenticate(&mut stream, channel_binding, config).await?;
let (process_id, secret_key, parameters) = read_info(&mut stream).await?;
let (sender, receiver) = mpsc::unbounded();
let client = Client::new(sender, config.ssl_mode, process_id, secret_key);
let connection = Connection::new(stream.inner, parameters, receiver);
Ok((client, connection))
}
async fn startup<S, T>(stream: &mut StartupStream<S, T>, config: &Config) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
let mut params = vec![("client_encoding", "UTF8"), ("timezone", "GMT")];
if let Some(user) = &config.user {
params.push(("user", &**user));
}
if let Some(dbname) = &config.dbname {
params.push(("database", &**dbname));
}
if let Some(options) = &config.options {
params.push(("options", &**options));
}
if let Some(application_name) = &config.application_name {
params.push(("application_name", &**application_name));
}
let mut buf = vec![];
frontend::startup_message(params, &mut buf).map_err(Error::encode)?;
stream
.send(FrontendMessage::Raw(buf))
.await
.map_err(Error::io)
}
async fn authenticate<S, T>(
stream: &mut StartupStream<S, T>,
channel_binding: ChannelBinding,
config: &Config,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationOk) => {
can_skip_channel_binding(config)?;
return Ok(());
}
Some(Message::AuthenticationCleartextPassword) => {
can_skip_channel_binding(config)?;
let pass = config
.password
.as_ref()
.ok_or_else(|| Error::config("password missing".into()))?;
authenticate_password(stream, pass).await?;
}
Some(Message::AuthenticationMd5Password(body)) => {
can_skip_channel_binding(config)?;
let user = config
.user
.as_ref()
.ok_or_else(|| Error::config("user missing".into()))?;
let pass = config
.password
.as_ref()
.ok_or_else(|| Error::config("password missing".into()))?;
let output = authentication::md5_hash(user.as_bytes(), pass, body.salt());
authenticate_password(stream, output.as_bytes()).await?;
}
Some(Message::AuthenticationSasl(body)) => {
authenticate_sasl(stream, body, channel_binding, config).await?;
}
Some(Message::AuthenticationKerberosV5)
| Some(Message::AuthenticationScmCredential)
| Some(Message::AuthenticationGss)
| Some(Message::AuthenticationSspi) => {
return Err(Error::authentication(
"unsupported authentication method".into(),
))
}
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
}
match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationOk) => Ok(()),
Some(Message::ErrorResponse(body)) => Err(Error::db(body)),
Some(_) => Err(Error::unexpected_message()),
None => Err(Error::closed()),
}
}
fn can_skip_channel_binding(config: &Config) -> Result<(), Error> {
match config.channel_binding {
config::ChannelBinding::Disable | config::ChannelBinding::Prefer => Ok(()),
config::ChannelBinding::Require => Err(Error::authentication(
"server did not use channel binding".into(),
)),
config::ChannelBinding::__NonExhaustive => unreachable!(),
}
}
async fn authenticate_password<S, T>(
stream: &mut StartupStream<S, T>,
password: &[u8],
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
let mut buf = vec![];
frontend::password_message(password, &mut buf).map_err(Error::encode)?;
stream
.send(FrontendMessage::Raw(buf))
.await
.map_err(Error::io)
}
async fn authenticate_sasl<S, T>(
stream: &mut StartupStream<S, T>,
body: AuthenticationSaslBody,
channel_binding: ChannelBinding,
config: &Config,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
let password = config
.password
.as_ref()
.ok_or_else(|| Error::config("password missing".into()))?;
let mut has_scram = false;
let mut has_scram_plus = false;
let mut mechanisms = body.mechanisms();
while let Some(mechanism) = mechanisms.next().map_err(Error::parse)? {
match mechanism {
sasl::SCRAM_SHA_256 => has_scram = true,
sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true,
_ => {}
}
}
let channel_binding = channel_binding
.tls_server_end_point
.filter(|_| config.channel_binding != config::ChannelBinding::Disable)
.map(sasl::ChannelBinding::tls_server_end_point);
let (channel_binding, mechanism) = if has_scram_plus {
match channel_binding {
Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS),
None => {
(sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256)
},
}
} else if has_scram {
match channel_binding {
Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256),
None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256),
}
} else {
return Err(Error::authentication("unsupported SASL mechanism".into()));
};
if mechanism != sasl::SCRAM_SHA_256_PLUS {
can_skip_channel_binding(config)?;
}
let mut scram = ScramSha256::new(password, channel_binding);
let mut buf = vec![];
frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?;
stream
.send(FrontendMessage::Raw(buf))
.await
.map_err(Error::io)?;
let body = match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationSaslContinue(body)) => body,
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
};
scram
.update(body.data())
.map_err(|e| Error::authentication(e.into()))?;
let mut buf = vec![];
frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?;
stream
.send(FrontendMessage::Raw(buf))
.await
.map_err(Error::io)?;
let body = match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationSaslFinal(body)) => body,
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
};
scram
.finish(body.data())
.map_err(|e| Error::authentication(e.into()))?;
Ok(())
}
async fn read_info<S, T>(
stream: &mut StartupStream<S, T>,
) -> Result<(i32, i32, HashMap<String, String>), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
let mut process_id = 0;
let mut secret_key = 0;
let mut parameters = HashMap::new();
loop {
match stream.try_next().await.map_err(Error::io)? {
Some(Message::BackendKeyData(body)) => {
process_id = body.process_id();
secret_key = body.secret_key();
}
Some(Message::ParameterStatus(body)) => {
parameters.insert(
body.name().map_err(Error::parse)?.to_string(),
body.value().map_err(Error::parse)?.to_string(),
);
}
Some(Message::NoticeResponse(_)) => {}
Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key, parameters)),
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
}
}
}
| 33.487032 | 98 | 0.601033 |
bf540b0428d95ba85959fc97b5b47a0e0312dfa0 | 16,785 | // Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
extern crate tempdir;
use bazel_protos;
use boxfuture::{Boxable, BoxFuture};
use bytes::Bytes;
use futures::Future;
use futures::future::join_all;
use hashing::{Digest, Fingerprint};
use itertools::Itertools;
use {File, FileContent, PathStat, Store};
use protobuf;
use std::collections::HashMap;
use std::ffi::OsString;
use std::fmt;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
const EMPTY_FINGERPRINT: Fingerprint = Fingerprint(
[
0xe3,
0xb0,
0xc4,
0x42,
0x98,
0xfc,
0x1c,
0x14,
0x9a,
0xfb,
0xf4,
0xc8,
0x99,
0x6f,
0xb9,
0x24,
0x27,
0xae,
0x41,
0xe4,
0x64,
0x9b,
0x93,
0x4c,
0xa4,
0x95,
0x99,
0x1b,
0x78,
0x52,
0xb8,
0x55,
],
);
pub const EMPTY_DIGEST: Digest = Digest(EMPTY_FINGERPRINT, 0);
#[derive(Clone, Eq, Hash, PartialEq)]
pub struct Snapshot {
pub digest: Digest,
pub path_stats: Vec<PathStat>,
}
// StoreFileByDigest allows a File to be saved to an underlying Store, in such a way that it can be
// looked up by the Digest produced by the store_by_digest method.
// It is a separate trait so that caching implementations can be written which wrap the Store (used
// to store the bytes) and VFS (used to read the files off disk if needed).
pub trait StoreFileByDigest<Error> {
fn store_by_digest(&self, file: &File) -> BoxFuture<Digest, Error>;
}
impl Snapshot {
pub fn empty() -> Snapshot {
Snapshot {
digest: EMPTY_DIGEST,
path_stats: vec![],
}
}
pub fn from_path_stats<
S: StoreFileByDigest<Error> + Sized + Clone,
Error: fmt::Debug + 'static + Send,
>(
store: Store,
file_digester: S,
path_stats: Vec<PathStat>,
) -> BoxFuture<Snapshot, String> {
let mut sorted_path_stats = path_stats.clone();
sorted_path_stats.sort_by(|a, b| a.path().cmp(b.path()));
Snapshot::ingest_directory_from_sorted_path_stats(store, file_digester, sorted_path_stats)
.map(|digest| Snapshot { digest, path_stats })
.to_boxed()
}
fn ingest_directory_from_sorted_path_stats<
S: StoreFileByDigest<Error> + Sized + Clone,
Error: fmt::Debug + 'static + Send,
>(
store: Store,
file_digester: S,
path_stats: Vec<PathStat>,
) -> BoxFuture<Digest, String> {
let mut file_futures: Vec<BoxFuture<bazel_protos::remote_execution::FileNode, String>> =
Vec::new();
let mut dir_futures: Vec<BoxFuture<bazel_protos::remote_execution::DirectoryNode, String>> =
Vec::new();
for (first_component, group) in
&path_stats.iter().cloned().group_by(|s| {
s.path().components().next().unwrap().as_os_str().to_owned()
})
{
let mut path_group: Vec<PathStat> = group.collect();
if path_group.len() == 1 && path_group.get(0).unwrap().path().components().count() == 1 {
// Exactly one entry with exactly one component indicates either a file in this directory,
// or an empty directory.
// If the child is a non-empty directory, or a file therein, there must be multiple
// PathStats with that prefix component, and we will handle that in the recursive
// save_directory call.
match path_group.pop().unwrap() {
PathStat::File { ref stat, .. } => {
let is_executable = stat.is_executable;
file_futures.push(
file_digester
.clone()
.store_by_digest(&stat)
.map_err(|e| format!("{:?}", e))
.and_then(move |digest| {
let mut file_node = bazel_protos::remote_execution::FileNode::new();
file_node.set_name(osstring_as_utf8(first_component)?);
file_node.set_digest((&digest).into());
file_node.set_is_executable(is_executable);
Ok(file_node)
})
.to_boxed(),
);
}
PathStat::Dir { .. } => {
// Because there are no children of this Dir, it must be empty.
dir_futures.push(
store
.record_directory(&bazel_protos::remote_execution::Directory::new(), true)
.map(move |digest| {
let mut directory_node = bazel_protos::remote_execution::DirectoryNode::new();
directory_node.set_name(osstring_as_utf8(first_component).unwrap());
directory_node.set_digest((&digest).into());
directory_node
})
.to_boxed(),
);
}
}
} else {
dir_futures.push(
// TODO: Memoize this in the graph
Snapshot::ingest_directory_from_sorted_path_stats(
store.clone(),
file_digester.clone(),
paths_of_child_dir(path_group),
).and_then(move |digest| {
let mut dir_node = bazel_protos::remote_execution::DirectoryNode::new();
dir_node.set_name(osstring_as_utf8(first_component)?);
dir_node.set_digest((&digest).into());
Ok(dir_node)
})
.to_boxed(),
);
}
}
join_all(dir_futures)
.join(join_all(file_futures))
.and_then(move |(dirs, files)| {
let mut directory = bazel_protos::remote_execution::Directory::new();
directory.set_directories(protobuf::RepeatedField::from_vec(dirs));
directory.set_files(protobuf::RepeatedField::from_vec(files));
store.record_directory(&directory, true)
})
.to_boxed()
}
// Preserves the order of Snapshot's path_stats in its returned Vec.
pub fn contents(self, store: Store) -> BoxFuture<Vec<FileContent>, String> {
let contents = Arc::new(Mutex::new(HashMap::new()));
let path_stats = self.path_stats;
Snapshot::contents_for_directory_helper(self.digest, store, PathBuf::from(""), contents.clone())
.map(move |_| {
let mut contents = contents.lock().unwrap();
let mut vec = Vec::new();
for path in path_stats.iter().filter_map(|path_stat| match path_stat {
&PathStat::File { ref path, .. } => Some(path.to_path_buf()),
&PathStat::Dir { .. } => None,
})
{
match contents.remove(&path) {
Some(content) => vec.push(FileContent { path, content }),
None => {
panic!(format!(
"PathStat for {:?} was present in path_stats but missing from Snapshot contents",
path
));
}
}
}
vec
})
.to_boxed()
}
// Assumes that all fingerprints it encounters are valid.
fn contents_for_directory_helper(
digest: Digest,
store: Store,
path_so_far: PathBuf,
contents_wrapped: Arc<Mutex<HashMap<PathBuf, Bytes>>>,
) -> BoxFuture<(), String> {
store
.load_directory(digest)
.and_then(move |maybe_dir| {
maybe_dir.ok_or_else(|| {
format!("Could not find directory with digest {:?}", digest)
})
})
.and_then(move |dir| {
let contents_wrapped_copy = contents_wrapped.clone();
let path_so_far_copy = path_so_far.clone();
let store_copy = store.clone();
let file_futures = join_all(
dir
.get_files()
.iter()
.map(move |file_node| {
let path = path_so_far_copy.join(file_node.get_name());
let contents_wrapped_copy = contents_wrapped_copy.clone();
store_copy
.load_file_bytes_with(file_node.get_digest().into(), |b| b)
.and_then(move |maybe_bytes| {
maybe_bytes
.ok_or_else(|| format!("Couldn't find file contents for {:?}", path))
.map(move |bytes| {
let mut contents = contents_wrapped_copy.lock().unwrap();
contents.insert(path, bytes);
})
})
})
.collect::<Vec<_>>(),
);
let contents_wrapped_copy2 = contents_wrapped.clone();
let store_copy = store.clone();
let dir_futures = join_all(
dir
.get_directories()
.iter()
.map(move |dir_node| {
Snapshot::contents_for_directory_helper(
dir_node.get_digest().into(),
store_copy.clone(),
path_so_far.join(dir_node.get_name()),
contents_wrapped_copy2.clone(),
)
})
.collect::<Vec<_>>(),
);
file_futures.join(dir_futures)
})
.map(|(_, _)| ())
.to_boxed()
}
}
impl fmt::Debug for Snapshot {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Snapshot(digest={:?}, entries={})",
self.digest,
self.path_stats.len()
)
}
}
fn paths_of_child_dir(paths: Vec<PathStat>) -> Vec<PathStat> {
paths
.into_iter()
.filter_map(|s| {
if s.path().components().count() == 1 {
return None;
}
Some(match s {
PathStat::File { path, stat } => {
PathStat::File {
path: path.iter().skip(1).collect(),
stat: stat,
}
}
PathStat::Dir { path, stat } => {
PathStat::Dir {
path: path.iter().skip(1).collect(),
stat: stat,
}
}
})
})
.collect()
}
fn osstring_as_utf8(path: OsString) -> Result<String, String> {
path.into_string().map_err(|p| {
format!("{:?}'s file_name is not representable in UTF8", p)
})
}
#[cfg(test)]
mod tests {
extern crate testutil;
extern crate tempdir;
use boxfuture::{BoxFuture, Boxable};
use bytes::Bytes;
use futures::future::Future;
use hashing::{Digest, Fingerprint};
use tempdir::TempDir;
use self::testutil::make_file;
use super::super::{File, FileContent, Path, PathGlobs, PathStat, PosixFS, ResettablePool,
Snapshot, Store, StoreFileByDigest, VFS};
use std;
use std::error::Error;
use std::path::PathBuf;
use std::sync::Arc;
const AGGRESSIVE: &str = "Aggressive";
const LATIN: &str = "Chaetophractus villosus";
const STR: &str = "European Burmese";
fn setup() -> (Store, TempDir, Arc<PosixFS>, FileSaver) {
let pool = Arc::new(ResettablePool::new("test-pool-".to_string()));
// TODO: Pass a remote CAS address through.
let store = Store::local_only(TempDir::new("lmdb_store").unwrap(), pool.clone()).unwrap();
let dir = TempDir::new("root").unwrap();
let posix_fs = Arc::new(PosixFS::new(dir.path(), pool, vec![]).unwrap());
let file_saver = FileSaver(store.clone(), posix_fs.clone());
(store, dir, posix_fs, file_saver)
}
#[test]
fn snapshot_one_file() {
let (store, dir, posix_fs, digester) = setup();
let file_name = PathBuf::from("roland");
make_file(&dir.path().join(&file_name), STR.as_bytes(), 0o600);
let path_stats = expand_all_sorted(posix_fs);
assert_eq!(
Snapshot::from_path_stats(store, digester, path_stats.clone())
.wait()
.unwrap(),
Snapshot {
digest: Digest(
Fingerprint::from_hex_string(
"63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16",
).unwrap(),
80,
),
path_stats: path_stats,
}
);
}
#[test]
fn snapshot_recursive_directories() {
let (store, dir, posix_fs, digester) = setup();
let cats = PathBuf::from("cats");
let roland = cats.join("roland");
std::fs::create_dir_all(&dir.path().join(cats)).unwrap();
make_file(&dir.path().join(&roland), STR.as_bytes(), 0o600);
let path_stats = expand_all_sorted(posix_fs);
assert_eq!(
Snapshot::from_path_stats(store, digester, path_stats.clone())
.wait()
.unwrap(),
Snapshot {
digest: Digest(
Fingerprint::from_hex_string(
"8b1a7ea04eaa2527b35683edac088bc826117b53b7ec6601740b55e20bce3deb",
).unwrap(),
78,
),
path_stats: path_stats,
}
);
}
#[test]
fn snapshot_recursive_directories_including_empty() {
let (store, dir, posix_fs, digester) = setup();
let cats = PathBuf::from("cats");
let roland = cats.join("roland");
let dogs = PathBuf::from("dogs");
let llamas = PathBuf::from("llamas");
std::fs::create_dir_all(&dir.path().join(&cats)).unwrap();
std::fs::create_dir_all(&dir.path().join(&dogs)).unwrap();
std::fs::create_dir_all(&dir.path().join(&llamas)).unwrap();
make_file(&dir.path().join(&roland), STR.as_bytes(), 0o600);
let sorted_path_stats = expand_all_sorted(posix_fs);
let mut unsorted_path_stats = sorted_path_stats.clone();
unsorted_path_stats.reverse();
assert_eq!(
Snapshot::from_path_stats(store, digester, unsorted_path_stats.clone())
.wait()
.unwrap(),
Snapshot {
digest: Digest(
Fingerprint::from_hex_string(
"fbff703bdaac62accf2ea5083bcfed89292073bf710ef9ad14d9298c637e777b",
).unwrap(),
232,
),
path_stats: unsorted_path_stats,
}
);
}
#[test]
fn contents_for_one_file() {
let (store, dir, posix_fs, digester) = setup();
let file_name = PathBuf::from("roland");
make_file(&dir.path().join(&file_name), STR.as_bytes(), 0o600);
let contents = Snapshot::from_path_stats(store.clone(), digester, expand_all_sorted(posix_fs))
.wait()
.unwrap()
.contents(store)
.wait()
.unwrap();
assert_snapshot_contents(contents, vec![(&file_name, STR)]);
}
#[test]
fn contents_for_files_in_multiple_directories() {
let (store, dir, posix_fs, digester) = setup();
let armadillos = PathBuf::from("armadillos");
let armadillos_abs = dir.path().join(&armadillos);
std::fs::create_dir_all(&armadillos_abs).unwrap();
let amy = armadillos.join("amy");
make_file(&dir.path().join(&amy), LATIN.as_bytes(), 0o600);
let rolex = armadillos.join("rolex");
make_file(&dir.path().join(&rolex), AGGRESSIVE.as_bytes(), 0o600);
let cats = PathBuf::from("cats");
let cats_abs = dir.path().join(&cats);
std::fs::create_dir_all(&cats_abs).unwrap();
let roland = cats.join("roland");
make_file(&dir.path().join(&roland), STR.as_bytes(), 0o600);
let dogs = PathBuf::from("dogs");
let dogs_abs = dir.path().join(&dogs);
std::fs::create_dir_all(&dogs_abs).unwrap();
let path_stats_sorted = expand_all_sorted(posix_fs);
let mut path_stats_reversed = path_stats_sorted.clone();
path_stats_reversed.reverse();
{
let snapshot =
Snapshot::from_path_stats(store.clone(), digester.clone(), path_stats_reversed)
.wait()
.unwrap();
let contents = snapshot.contents(store.clone()).wait().unwrap();
assert_snapshot_contents(
contents,
vec![(&roland, STR), (&rolex, AGGRESSIVE), (&amy, LATIN)],
);
}
{
let contents = Snapshot::from_path_stats(store.clone(), digester, path_stats_sorted)
.wait()
.unwrap()
.contents(store)
.wait()
.unwrap();
assert_snapshot_contents(
contents,
vec![(&amy, LATIN), (&rolex, AGGRESSIVE), (&roland, STR)],
);
}
}
#[derive(Clone)]
struct FileSaver(Store, Arc<PosixFS>);
impl StoreFileByDigest<String> for FileSaver {
fn store_by_digest(&self, file: &File) -> BoxFuture<Digest, String> {
let file_copy = file.clone();
let store = self.0.clone();
self
.1
.clone()
.read_file(&file)
.map_err(move |err| {
format!("Error reading file {:?}: {}", file_copy, err.description())
})
.and_then(move |content| store.store_file_bytes(content.content, true))
.to_boxed()
}
}
fn expand_all_sorted(posix_fs: Arc<PosixFS>) -> Vec<PathStat> {
let mut v = posix_fs
.expand(PathGlobs::create(&["**".to_owned()], &vec![]).unwrap())
.wait()
.unwrap();
v.sort_by(|a, b| a.path().cmp(b.path()));
v
}
fn assert_snapshot_contents(contents: Vec<FileContent>, expected: Vec<(&Path, &str)>) {
let expected_with_array: Vec<_> = expected
.into_iter()
.map(|(path, s)| (path.to_path_buf(), Bytes::from(s)))
.collect();
let got: Vec<_> = contents
.into_iter()
.map(|file_content| (file_content.path, file_content.content))
.collect();
assert_eq!(expected_with_array, got);
}
}
| 31.315299 | 100 | 0.589693 |
268618c5d8d9ea985595894ac7812e2c608b7664 | 851 | use windows_dll::{dll, flags::*};
use winapi::shared::{
minwindef::{BOOL, ULONG},
ntdef::LPCWSTR,
};
#[dll("bcrypt", LOAD_LIBRARY_SEARCH_SYSTEM32)]
extern "system" {
#[link_name = "BCryptAddContextFunction"]
fn bcrypt_add_context_function(
dw_table: ULONG,
psz_context: LPCWSTR,
dw_interface: ULONG,
psz_function: LPCWSTR,
dw_position: ULONG,
) -> BOOL;
}
#[dll("firewallapi.dll", LOAD_LIBRARY_SEARCH_APPLICATION_DIR)]
extern "system" {
#[link_name = "FWAddFirewallRule"]
pub fn fw_add_firewall_rule() -> ();
}
#[test]
fn assert_args_passed() {
assert!(
bcrypt_add_context_function::exists(),
"Didn't find bcrypt.dll in system dir..."
);
assert!(
!fw_add_firewall_rule::exists(),
"Found firewallapi.dll in application dir..."
);
}
| 23 | 62 | 0.633373 |
914d17fb4146f771ddbe48314822364bb627fb2c | 93 |
// snippet::fn_1
fn sample_fn_1() {
}
// end::fn_1
// snippet::fn_2
fn sample_fn_2() {
}
| 7.75 | 18 | 0.591398 |
33f7d37591ae65486f3cf524649eb49baa1a0cde | 3,230 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Common utilities for pseudo file implementations
use {
fidl_fuchsia_io::{
MODE_PROTECTION_MASK, MODE_TYPE_DIRECTORY, MODE_TYPE_FILE, OPEN_FLAG_APPEND,
OPEN_FLAG_DESCRIBE, OPEN_FLAG_DIRECTORY, OPEN_FLAG_NODE_REFERENCE, OPEN_FLAG_NOT_DIRECTORY,
OPEN_FLAG_POSIX, OPEN_FLAG_POSIX_EXECUTABLE, OPEN_FLAG_POSIX_WRITABLE, OPEN_FLAG_TRUNCATE,
OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE,
},
fuchsia_zircon::Status,
};
/// Validate that the requested flags for a new connection are valid. This validates flags against
/// the flags of the parent connection, as well as whether or not the pseudo file is readable or
/// writable at all (e.g. if an on_read or on_write function was provided, respectively). On success,
/// it returns the validated flags, with some ambiguities cleaned up. On failure, it returns a
/// [`Status`] indicating the problem.
///
/// Changing this function can be dangerous! Not only does it have obvious security implications, but
/// connections currently rely on it to reject unsupported functionality, such as attempting to read
/// from a file when `on_read` is `None`.
pub fn new_connection_validate_flags(
mut flags: u32,
mode: u32,
readable: bool,
writable: bool,
) -> Result<u32, Status> {
// There should be no MODE_TYPE_* flags set, except for, possibly, MODE_TYPE_FILE when the
// target is a pseudo file.
if (mode & !MODE_PROTECTION_MASK) & !MODE_TYPE_FILE != 0 {
if (mode & !MODE_PROTECTION_MASK) & MODE_TYPE_DIRECTORY != 0 {
return Err(Status::NOT_DIR);
} else {
return Err(Status::INVALID_ARGS);
};
}
if flags & OPEN_FLAG_NODE_REFERENCE != 0 {
flags &= !OPEN_FLAG_NODE_REFERENCE;
flags &= OPEN_FLAG_DIRECTORY | OPEN_FLAG_DESCRIBE;
}
if flags & OPEN_FLAG_DIRECTORY != 0 {
return Err(Status::NOT_DIR);
}
if flags & OPEN_FLAG_NOT_DIRECTORY != 0 {
flags &= !OPEN_FLAG_NOT_DIRECTORY;
}
// For files all OPEN_FLAG_POSIX flags are ignored as they have meaning only for directories.
flags &= !(OPEN_FLAG_POSIX | OPEN_FLAG_POSIX_WRITABLE | OPEN_FLAG_POSIX_EXECUTABLE);
let allowed_flags = OPEN_FLAG_DESCRIBE
| if readable { OPEN_RIGHT_READABLE } else { 0 }
| if writable { OPEN_RIGHT_WRITABLE | OPEN_FLAG_TRUNCATE } else { 0 };
let prohibited_flags = (0 | if readable {
OPEN_FLAG_TRUNCATE
} else {
0
} | if writable {
OPEN_FLAG_APPEND
} else {
0
})
// allowed_flags takes precedence over prohibited_flags.
& !allowed_flags;
if !readable && flags & OPEN_RIGHT_READABLE != 0 {
return Err(Status::ACCESS_DENIED);
}
if !writable && flags & OPEN_RIGHT_WRITABLE != 0 {
return Err(Status::ACCESS_DENIED);
}
if flags & prohibited_flags != 0 {
return Err(Status::INVALID_ARGS);
}
if flags & !allowed_flags != 0 {
return Err(Status::NOT_SUPPORTED);
}
Ok(flags)
}
| 35.108696 | 101 | 0.672446 |
79821077b9ef98f708318f5718e254fc1fa406b3 | 32,723 | use std::collections::HashMap;
use std::env;
use std::fs;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::iter::Peekable;
use std::mem;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::str::Chars;
use std::thread;
use crate::config::{Color, Config, EmitMode, FileName, NewlineStyle, ReportTactic};
use crate::formatting::{ReportedErrors, SourceFile};
use crate::rustfmt_diff::{make_diff, print_diff, DiffLine, Mismatch, ModifiedChunk, OutputWriter};
use crate::source_file;
use crate::{is_nightly_channel, FormatReport, FormatReportFormatterBuilder, Input, Session};
use rustfmt_config_proc_macro::nightly_only_test;
mod configuration_snippet;
mod mod_resolver;
mod parser;
const DIFF_CONTEXT_SIZE: usize = 3;
// A list of files on which we want to skip testing.
const SKIP_FILE_WHITE_LIST: &[&str] = &[
// We want to make sure that the `skip_children` is correctly working,
// so we do not want to test this file directly.
"configs/skip_children/foo/mod.rs",
"issue-3434/no_entry.rs",
"issue-3665/sub_mod.rs",
// Testing for issue-3779
"issue-3779/ice.rs",
// These files and directory are a part of modules defined inside `cfg_if!`.
"cfg_if/mod.rs",
"cfg_if/detect",
"issue-3253/foo.rs",
"issue-3253/bar.rs",
"issue-3253/paths",
// These files and directory are a part of modules defined inside `cfg_attr(..)`.
"cfg_mod/dir",
"cfg_mod/bar.rs",
"cfg_mod/foo.rs",
"cfg_mod/wasm32.rs",
"skip/foo.rs",
];
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
struct TestSetting {
/// The size of the stack of the thread that run tests.
stack_size: usize,
}
impl Default for TestSetting {
fn default() -> Self {
TestSetting {
stack_size: 8_388_608, // 8MB
}
}
}
fn run_test_with<F>(test_setting: &TestSetting, f: F)
where
F: FnOnce(),
F: Send + 'static,
{
thread::Builder::new()
.stack_size(test_setting.stack_size)
.spawn(f)
.expect("Failed to create a test thread")
.join()
.expect("Failed to join a test thread")
}
fn is_subpath<P>(path: &Path, subpath: &P) -> bool
where
P: AsRef<Path>,
{
(0..path.components().count())
.map(|i| {
path.components()
.skip(i)
.take(subpath.as_ref().components().count())
})
.any(|c| c.zip(subpath.as_ref().components()).all(|(a, b)| a == b))
}
fn is_file_skip(path: &Path) -> bool {
SKIP_FILE_WHITE_LIST
.iter()
.any(|file_path| is_subpath(path, file_path))
}
// Returns a `Vec` containing `PathBuf`s of files with an `rs` extension in the
// given path. The `recursive` argument controls if files from subdirectories
// are also returned.
fn get_test_files(path: &Path, recursive: bool) -> Vec<PathBuf> {
let mut files = vec![];
if path.is_dir() {
for entry in fs::read_dir(path).expect(&format!(
"couldn't read directory {}",
path.to_str().unwrap()
)) {
let entry = entry.expect("couldn't get `DirEntry`");
let path = entry.path();
if path.is_dir() && recursive {
files.append(&mut get_test_files(&path, recursive));
} else if path.extension().map_or(false, |f| f == "rs") && !is_file_skip(&path) {
files.push(path);
}
}
}
files
}
fn verify_config_used(path: &Path, config_name: &str) {
for entry in fs::read_dir(path).expect(&format!(
"couldn't read {} directory",
path.to_str().unwrap()
)) {
let entry = entry.expect("couldn't get directory entry");
let path = entry.path();
if path.extension().map_or(false, |f| f == "rs") {
// check if "// rustfmt-<config_name>:" appears in the file.
let filebuf = BufReader::new(
fs::File::open(&path)
.unwrap_or_else(|_| panic!("couldn't read file {}", path.display())),
);
assert!(
filebuf
.lines()
.map(Result::unwrap)
.take_while(|l| l.starts_with("//"))
.any(|l| l.starts_with(&format!("// rustfmt-{}", config_name))),
"config option file {} does not contain expected config name",
path.display()
);
}
}
}
#[test]
fn verify_config_test_names() {
init_log();
for path in &[
Path::new("tests/source/configs"),
Path::new("tests/target/configs"),
] {
for entry in fs::read_dir(path).expect("couldn't read configs directory") {
let entry = entry.expect("couldn't get directory entry");
let path = entry.path();
if path.is_dir() {
let config_name = path.file_name().unwrap().to_str().unwrap();
// Make sure that config name is used in the files in the directory.
verify_config_used(&path, config_name);
}
}
}
}
// This writes to the terminal using the same approach (via `term::stdout` or
// `println!`) that is used by `rustfmt::rustfmt_diff::print_diff`. Writing
// using only one or the other will cause the output order to differ when
// `print_diff` selects the approach not used.
fn write_message(msg: &str) {
let mut writer = OutputWriter::new(Color::Auto);
writer.writeln(msg, None);
}
// Integration tests. The files in `tests/source` are formatted and compared
// to their equivalent in `tests/target`. The target file and config can be
// overridden by annotations in the source file. The input and output must match
// exactly.
#[test]
fn system_tests() {
init_log();
run_test_with(&TestSetting::default(), || {
// Get all files in the tests/source directory.
let files = get_test_files(Path::new("tests/source"), true);
let (_reports, count, fails) = check_files(files, &None);
// Display results.
println!("Ran {} system tests.", count);
assert_eq!(fails, 0, "{} system tests failed", fails);
assert!(
count >= 300,
"Expected a minimum of {} system tests to be executed",
300
)
});
}
// Do the same for tests/coverage-source directory.
// The only difference is the coverage mode.
#[test]
fn coverage_tests() {
init_log();
let files = get_test_files(Path::new("tests/coverage/source"), true);
let (_reports, count, fails) = check_files(files, &None);
println!("Ran {} tests in coverage mode.", count);
assert_eq!(fails, 0, "{} tests failed", fails);
}
#[test]
fn checkstyle_test() {
init_log();
let filename = "tests/writemode/source/fn-single-line.rs";
let expected_filename = "tests/writemode/target/checkstyle.xml";
assert_output(Path::new(filename), Path::new(expected_filename));
}
#[test]
fn json_test() {
init_log();
let filename = "tests/writemode/source/json.rs";
let expected_filename = "tests/writemode/target/output.json";
assert_output(Path::new(filename), Path::new(expected_filename));
}
#[test]
fn modified_test() {
init_log();
use std::io::BufRead;
// Test "modified" output
let filename = "tests/writemode/source/modified.rs";
let mut data = Vec::new();
let mut config = Config::default();
config
.set()
.emit_mode(crate::config::EmitMode::ModifiedLines);
{
let mut session = Session::new(config, Some(&mut data));
session.format(Input::File(filename.into())).unwrap();
}
let mut lines = data.lines();
let mut chunks = Vec::new();
while let Some(Ok(header)) = lines.next() {
// Parse the header line
let values: Vec<_> = header
.split(' ')
.map(|s| s.parse::<u32>().unwrap())
.collect();
assert_eq!(values.len(), 3);
let line_number_orig = values[0];
let lines_removed = values[1];
let num_added = values[2];
let mut added_lines = Vec::new();
for _ in 0..num_added {
added_lines.push(lines.next().unwrap().unwrap());
}
chunks.push(ModifiedChunk {
line_number_orig,
lines_removed,
lines: added_lines,
});
}
assert_eq!(
chunks,
vec![
ModifiedChunk {
line_number_orig: 4,
lines_removed: 4,
lines: vec!["fn blah() {}".into()],
},
ModifiedChunk {
line_number_orig: 9,
lines_removed: 6,
lines: vec!["#[cfg(a, b)]".into(), "fn main() {}".into()],
},
],
);
}
// Helper function for comparing the results of rustfmt
// to a known output file generated by one of the write modes.
fn assert_output(source: &Path, expected_filename: &Path) {
let config = read_config(source);
let (_, source_file, _) = format_file(source, config.clone());
// Populate output by writing to a vec.
let mut out = vec![];
let _ = source_file::write_all_files(&source_file, &mut out, &config);
let output = String::from_utf8(out).unwrap();
let mut expected_file = fs::File::open(&expected_filename).expect("couldn't open target");
let mut expected_text = String::new();
expected_file
.read_to_string(&mut expected_text)
.expect("Failed reading target");
let compare = make_diff(&expected_text, &output, DIFF_CONTEXT_SIZE);
if !compare.is_empty() {
let mut failures = HashMap::new();
failures.insert(source.to_owned(), compare);
print_mismatches_default_message(failures);
panic!("Text does not match expected output");
}
}
// Helper function for comparing the results of rustfmt
// to a known output generated by one of the write modes.
fn assert_stdin_output(
source: &Path,
expected_filename: &Path,
emit_mode: EmitMode,
has_diff: bool,
) {
let mut config = Config::default();
config.set().newline_style(NewlineStyle::Unix);
config.set().emit_mode(emit_mode);
let mut source_file = fs::File::open(&source).expect("couldn't open source");
let mut source_text = String::new();
source_file
.read_to_string(&mut source_text)
.expect("Failed reading target");
let input = Input::Text(source_text);
// Populate output by writing to a vec.
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
let errors = ReportedErrors {
has_diff: has_diff,
..Default::default()
};
assert_eq!(session.errors, errors);
}
let mut expected_file = fs::File::open(&expected_filename).expect("couldn't open target");
let mut expected_text = String::new();
expected_file
.read_to_string(&mut expected_text)
.expect("Failed reading target");
let output = String::from_utf8(buf).unwrap();
let compare = make_diff(&expected_text, &output, DIFF_CONTEXT_SIZE);
if !compare.is_empty() {
let mut failures = HashMap::new();
failures.insert(source.to_owned(), compare);
print_mismatches_default_message(failures);
panic!("Text does not match expected output");
}
}
// Idempotence tests. Files in tests/target are checked to be unaltered by
// rustfmt.
#[nightly_only_test]
#[test]
fn idempotence_tests() {
init_log();
run_test_with(&TestSetting::default(), || {
// Get all files in the tests/target directory.
let files = get_test_files(Path::new("tests/target"), true);
let (_reports, count, fails) = check_files(files, &None);
// Display results.
println!("Ran {} idempotent tests.", count);
assert_eq!(fails, 0, "{} idempotent tests failed", fails);
assert!(
count >= 400,
"Expected a minimum of {} idempotent tests to be executed",
400
)
});
}
#[nightly_only_test]
#[test]
fn self_tests() {
let get_exe_path = |name| {
let mut path = env::current_exe().unwrap();
path.pop();
path.set_file_name(format!("{name}{}", env::consts::EXE_SUFFIX));
path
};
let status = Command::new(get_exe_path("cargo-fmt"))
.args(["--check", "--all"])
.env("RUSTFMT", get_exe_path("rustfmt"))
.status()
.unwrap();
assert!(status.success());
}
#[test]
fn format_files_find_new_files_via_cfg_if() {
init_log();
run_test_with(&TestSetting::default(), || {
// To repro issue-4656, it is necessary that these files are parsed
// as a part of the same session (hence this separate test runner).
let files = vec![
Path::new("tests/source/issue-4656/lib2.rs"),
Path::new("tests/source/issue-4656/lib.rs"),
];
let config = Config::default();
let mut session = Session::<io::Stdout>::new(config, None);
let mut write_result = HashMap::new();
for file in files {
assert!(file.exists());
let result = session.format(Input::File(file.into())).unwrap();
assert!(!session.has_formatting_errors());
assert!(!result.has_warnings());
let mut source_file = SourceFile::new();
mem::swap(&mut session.source_file, &mut source_file);
for (filename, text) in source_file {
if let FileName::Real(ref filename) = filename {
write_result.insert(filename.to_owned(), text);
}
}
}
assert_eq!(
3,
write_result.len(),
"Should have uncovered an extra file (format_me_please.rs) via lib.rs"
);
assert!(handle_result(write_result, None).is_ok());
});
}
#[test]
fn stdin_formatting_smoke_test() {
init_log();
let input = Input::Text("fn main () {}".to_owned());
let mut config = Config::default();
config.set().emit_mode(EmitMode::Stdout);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
assert!(session.has_no_errors());
}
#[cfg(not(windows))]
assert_eq!(buf, "<stdin>:\n\nfn main() {}\n".as_bytes());
#[cfg(windows)]
assert_eq!(buf, "<stdin>:\n\nfn main() {}\r\n".as_bytes());
}
#[test]
fn stdin_parser_panic_caught() {
init_log();
// See issue #3239.
for text in ["{", "}"].iter().cloned().map(String::from) {
let mut buf = vec![];
let mut session = Session::new(Default::default(), Some(&mut buf));
let _ = session.format(Input::Text(text));
assert!(session.has_parsing_errors());
}
}
/// Ensures that `EmitMode::ModifiedLines` works with input from `stdin`. Useful
/// when embedding Rustfmt (e.g. inside RLS).
#[test]
fn stdin_works_with_modified_lines() {
init_log();
let input = "\nfn\n some( )\n{\n}\nfn main () {}\n";
let output = "1 6 2\nfn some() {}\nfn main() {}\n";
let input = Input::Text(input.to_owned());
let mut config = Config::default();
config.set().newline_style(NewlineStyle::Unix);
config.set().emit_mode(EmitMode::ModifiedLines);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
let errors = ReportedErrors {
has_diff: true,
..Default::default()
};
assert_eq!(session.errors, errors);
}
assert_eq!(buf, output.as_bytes());
}
/// Ensures that `EmitMode::Json` works with input from `stdin`.
#[test]
fn stdin_works_with_json() {
init_log();
assert_stdin_output(
Path::new("tests/writemode/source/stdin.rs"),
Path::new("tests/writemode/target/stdin.json"),
EmitMode::Json,
true,
);
}
/// Ensures that `EmitMode::Checkstyle` works with input from `stdin`.
#[test]
fn stdin_works_with_checkstyle() {
init_log();
assert_stdin_output(
Path::new("tests/writemode/source/stdin.rs"),
Path::new("tests/writemode/target/stdin.xml"),
EmitMode::Checkstyle,
false,
);
}
#[test]
fn stdin_disable_all_formatting_test() {
init_log();
let input = String::from("fn main() { println!(\"This should not be formatted.\"); }");
let mut child = Command::new(rustfmt().to_str().unwrap())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.arg("--config-path=./tests/config/disable_all_formatting.toml")
.spawn()
.expect("failed to execute child");
{
let stdin = child.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(input.as_bytes())
.expect("failed to write stdin");
}
let output = child.wait_with_output().expect("failed to wait on child");
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert_eq!(input, String::from_utf8(output.stdout).unwrap());
}
#[test]
fn stdin_generated_files_issue_5172() {
init_log();
let input = Input::Text("//@generated\nfn main() {}".to_owned());
let mut config = Config::default();
config.set().emit_mode(EmitMode::Stdout);
config.set().format_generated_files(false);
config.set().newline_style(NewlineStyle::Unix);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
assert!(session.has_no_errors());
}
// N.B. this should be changed once `format_generated_files` is supported with stdin
assert_eq!(
String::from_utf8(buf).unwrap(),
"<stdin>:\n\n//@generated\nfn main() {}\n",
);
}
#[test]
fn format_lines_errors_are_reported() {
init_log();
let long_identifier = String::from_utf8(vec![b'a'; 239]).unwrap();
let input = Input::Text(format!("fn {}() {{}}", long_identifier));
let mut config = Config::default();
config.set().error_on_line_overflow(true);
let mut session = Session::<io::Stdout>::new(config, None);
session.format(input).unwrap();
assert!(session.has_formatting_errors());
}
#[test]
fn format_lines_errors_are_reported_with_tabs() {
init_log();
let long_identifier = String::from_utf8(vec![b'a'; 97]).unwrap();
let input = Input::Text(format!("fn a() {{\n\t{}\n}}", long_identifier));
let mut config = Config::default();
config.set().error_on_line_overflow(true);
config.set().hard_tabs(true);
let mut session = Session::<io::Stdout>::new(config, None);
session.format(input).unwrap();
assert!(session.has_formatting_errors());
}
// For each file, run rustfmt and collect the output.
// Returns the number of files checked and the number of failures.
fn check_files(files: Vec<PathBuf>, opt_config: &Option<PathBuf>) -> (Vec<FormatReport>, u32, u32) {
let mut count = 0;
let mut fails = 0;
let mut reports = vec![];
for file_name in files {
let sig_comments = read_significant_comments(&file_name);
if sig_comments.contains_key("unstable") && !is_nightly_channel!() {
debug!(
"Skipping '{}' because it requires unstable \
features which are only available on nightly...",
file_name.display()
);
continue;
}
debug!("Testing '{}'...", file_name.display());
match idempotent_check(&file_name, opt_config) {
Ok(ref report) if report.has_warnings() => {
print!("{}", FormatReportFormatterBuilder::new(report).build());
fails += 1;
}
Ok(report) => reports.push(report),
Err(err) => {
if let IdempotentCheckError::Mismatch(msg) = err {
print_mismatches_default_message(msg);
}
fails += 1;
}
}
count += 1;
}
(reports, count, fails)
}
fn print_mismatches_default_message(result: HashMap<PathBuf, Vec<Mismatch>>) {
for (file_name, diff) in result {
let mismatch_msg_formatter =
|line_num| format!("\nMismatch at {}:{}:", file_name.display(), line_num);
print_diff(diff, &mismatch_msg_formatter, &Default::default());
}
if let Some(mut t) = term::stdout() {
t.reset().unwrap_or(());
}
}
fn print_mismatches<T: Fn(u32) -> String>(
result: HashMap<PathBuf, Vec<Mismatch>>,
mismatch_msg_formatter: T,
) {
for (_file_name, diff) in result {
print_diff(diff, &mismatch_msg_formatter, &Default::default());
}
if let Some(mut t) = term::stdout() {
t.reset().unwrap_or(());
}
}
fn read_config(filename: &Path) -> Config {
let sig_comments = read_significant_comments(filename);
// Look for a config file. If there is a 'config' property in the significant comments, use
// that. Otherwise, if there are no significant comments at all, look for a config file with
// the same name as the test file.
let mut config = if !sig_comments.is_empty() {
get_config(sig_comments.get("config").map(Path::new))
} else {
get_config(filename.with_extension("toml").file_name().map(Path::new))
};
for (key, val) in &sig_comments {
if key != "target" && key != "config" && key != "unstable" {
config.override_value(key, val);
if config.is_default(key) {
warn!("Default value {} used explicitly for {}", val, key);
}
}
}
// Don't generate warnings for to-do items.
config.set().report_todo(ReportTactic::Never);
config
}
fn format_file<P: Into<PathBuf>>(filepath: P, config: Config) -> (bool, SourceFile, FormatReport) {
let filepath = filepath.into();
let input = Input::File(filepath);
let mut session = Session::<io::Stdout>::new(config, None);
let result = session.format(input).unwrap();
let parsing_errors = session.has_parsing_errors();
let mut source_file = SourceFile::new();
mem::swap(&mut session.source_file, &mut source_file);
(parsing_errors, source_file, result)
}
enum IdempotentCheckError {
Mismatch(HashMap<PathBuf, Vec<Mismatch>>),
Parse,
}
fn idempotent_check(
filename: &PathBuf,
opt_config: &Option<PathBuf>,
) -> Result<FormatReport, IdempotentCheckError> {
let sig_comments = read_significant_comments(filename);
let config = if let Some(ref config_file_path) = opt_config {
Config::from_toml_path(Some(config_file_path), None).expect("`rustfmt.toml` not found")
} else {
read_config(filename)
};
let (parsing_errors, source_file, format_report) = format_file(filename, config);
if parsing_errors {
return Err(IdempotentCheckError::Parse);
}
let mut write_result = HashMap::new();
for (filename, text) in source_file {
if let FileName::Real(ref filename) = filename {
write_result.insert(filename.to_owned(), text);
}
}
let target = sig_comments.get("target").map(|x| &(*x)[..]);
handle_result(write_result, target).map(|_| format_report)
}
// Reads test config file using the supplied (optional) file name. If there's no file name or the
// file doesn't exist, just return the default config. Otherwise, the file must be read
// successfully.
fn get_config(config_file: Option<&Path>) -> Config {
let config_file_name = match config_file {
None => return Default::default(),
Some(file_name) => {
let mut full_path = PathBuf::from("tests/config/");
full_path.push(file_name);
if !full_path.exists() {
return Default::default();
};
full_path
}
};
let mut def_config_file = fs::File::open(config_file_name).expect("couldn't open config");
let mut def_config = String::new();
def_config_file
.read_to_string(&mut def_config)
.expect("Couldn't read config");
Config::from_toml(Some(&def_config), Some(Path::new("tests/config/")), None)
.expect("invalid TOML")
}
// Reads significant comments of the form: `// rustfmt-key: value` into a hash map.
fn read_significant_comments(file_name: &Path) -> HashMap<String, String> {
let file = fs::File::open(file_name)
.unwrap_or_else(|_| panic!("couldn't read file {}", file_name.display()));
let reader = BufReader::new(file);
let pattern = r"^\s*//\s*rustfmt-([^:]+):\s*(\S+)";
let regex = regex::Regex::new(pattern).expect("failed creating pattern 1");
// Matches lines containing significant comments or whitespace.
let line_regex = regex::Regex::new(r"(^\s*$)|(^\s*//\s*rustfmt-[^:]+:\s*\S+)")
.expect("failed creating pattern 2");
reader
.lines()
.map(|line| line.expect("failed getting line"))
.filter(|line| line_regex.is_match(line))
.filter_map(|line| {
regex.captures_iter(&line).next().map(|capture| {
(
capture
.get(1)
.expect("couldn't unwrap capture")
.as_str()
.to_owned(),
capture
.get(2)
.expect("couldn't unwrap capture")
.as_str()
.to_owned(),
)
})
})
.collect()
}
// Compares output to input.
// TODO: needs a better name, more explanation.
fn handle_result(
result: HashMap<PathBuf, String>,
target: Option<&str>,
) -> Result<(), IdempotentCheckError> {
let mut failures = HashMap::new();
for (file_name, fmt_text) in result {
// If file is in tests/source, compare to file with same name in tests/target.
let target = get_target(&file_name, target);
let open_error = format!("couldn't open target {:?}", target);
let mut f = fs::File::open(&target).expect(&open_error);
let mut text = String::new();
let read_error = format!("failed reading target {:?}", target);
f.read_to_string(&mut text).expect(&read_error);
// Ignore LF and CRLF difference for Windows.
if !string_eq_ignore_newline_repr(&fmt_text, &text) {
let diff = make_diff(&text, &fmt_text, DIFF_CONTEXT_SIZE);
assert!(
!diff.is_empty(),
"Empty diff? Maybe due to a missing a newline at the end of a file?"
);
failures.insert(file_name, diff);
}
}
if failures.is_empty() {
Ok(())
} else {
Err(IdempotentCheckError::Mismatch(failures))
}
}
// Maps source file paths to their target paths.
fn get_target(file_name: &Path, target: Option<&str>) -> PathBuf {
if let Some(n) = file_name
.components()
.position(|c| c.as_os_str() == "source")
{
let mut target_file_name = PathBuf::new();
for (i, c) in file_name.components().enumerate() {
if i == n {
target_file_name.push("target");
} else {
target_file_name.push(c.as_os_str());
}
}
if let Some(replace_name) = target {
target_file_name.with_file_name(replace_name)
} else {
target_file_name
}
} else {
// This is either and idempotence check or a self check.
file_name.to_owned()
}
}
#[test]
fn rustfmt_diff_make_diff_tests() {
init_log();
let diff = make_diff("a\nb\nc\nd", "a\ne\nc\nd", 3);
assert_eq!(
diff,
vec![Mismatch {
line_number: 1,
line_number_orig: 1,
lines: vec![
DiffLine::Context("a".into()),
DiffLine::Resulting("b".into()),
DiffLine::Expected("e".into()),
DiffLine::Context("c".into()),
DiffLine::Context("d".into()),
],
}]
);
}
#[test]
fn rustfmt_diff_no_diff_test() {
init_log();
let diff = make_diff("a\nb\nc\nd", "a\nb\nc\nd", 3);
assert_eq!(diff, vec![]);
}
// Compare strings without distinguishing between CRLF and LF
fn string_eq_ignore_newline_repr(left: &str, right: &str) -> bool {
let left = CharsIgnoreNewlineRepr(left.chars().peekable());
let right = CharsIgnoreNewlineRepr(right.chars().peekable());
left.eq(right)
}
struct CharsIgnoreNewlineRepr<'a>(Peekable<Chars<'a>>);
impl<'a> Iterator for CharsIgnoreNewlineRepr<'a> {
type Item = char;
fn next(&mut self) -> Option<char> {
self.0.next().map(|c| {
if c == '\r' {
if *self.0.peek().unwrap_or(&'\0') == '\n' {
self.0.next();
'\n'
} else {
'\r'
}
} else {
c
}
})
}
}
#[test]
fn string_eq_ignore_newline_repr_test() {
init_log();
assert!(string_eq_ignore_newline_repr("", ""));
assert!(!string_eq_ignore_newline_repr("", "abc"));
assert!(!string_eq_ignore_newline_repr("abc", ""));
assert!(string_eq_ignore_newline_repr("a\nb\nc\rd", "a\nb\r\nc\rd"));
assert!(string_eq_ignore_newline_repr("a\r\n\r\n\r\nb", "a\n\n\nb"));
assert!(!string_eq_ignore_newline_repr("a\r\nbcd", "a\nbcdefghijk"));
}
struct TempFile {
path: PathBuf,
}
fn make_temp_file(file_name: &'static str) -> TempFile {
use std::env::var;
use std::fs::File;
// Used in the Rust build system.
let target_dir = var("RUSTFMT_TEST_DIR").unwrap_or_else(|_| ".".to_owned());
let path = Path::new(&target_dir).join(file_name);
let mut file = File::create(&path).expect("couldn't create temp file");
let content = "fn main() {}\n";
file.write_all(content.as_bytes())
.expect("couldn't write temp file");
TempFile { path }
}
impl Drop for TempFile {
fn drop(&mut self) {
use std::fs::remove_file;
remove_file(&self.path).expect("couldn't delete temp file");
}
}
fn rustfmt() -> PathBuf {
let mut me = env::current_exe().expect("failed to get current executable");
// Chop of the test name.
me.pop();
// Chop off `deps`.
me.pop();
// If we run `cargo test --release`, we might only have a release build.
if cfg!(release) {
// `../release/`
me.pop();
me.push("release");
}
me.push("rustfmt");
assert!(
me.is_file() || me.with_extension("exe").is_file(),
"{}",
if cfg!(release) {
"no rustfmt bin, try running `cargo build --release` before testing"
} else {
"no rustfmt bin, try running `cargo build` before testing"
}
);
me
}
#[test]
fn verify_check_works() {
init_log();
let temp_file = make_temp_file("temp_check.rs");
Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.arg(temp_file.path.to_str().unwrap())
.status()
.expect("run with check option failed");
}
#[test]
fn verify_check_works_with_stdin() {
init_log();
let mut child = Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("run with check option failed");
{
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
stdin
.write_all("fn main() {}\n".as_bytes())
.expect("Failed to write to rustfmt --check");
}
let output = child
.wait_with_output()
.expect("Failed to wait on rustfmt child");
assert!(output.status.success());
}
#[test]
fn verify_check_l_works_with_stdin() {
init_log();
let mut child = Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.arg("-l")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("run with check option failed");
{
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
stdin
.write_all("fn main()\n{}\n".as_bytes())
.expect("Failed to write to rustfmt --check");
}
let output = child
.wait_with_output()
.expect("Failed to wait on rustfmt child");
assert!(output.status.success());
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), "<stdin>\n");
}
| 32.144401 | 100 | 0.589585 |
fc4e8a81c663d6f20cd085f772e54b279933c13b | 900 | mod helper;
use helper::{
TestEnum,
TestEnum::{Int, OtherUnit, Tuple, Unit},
};
#[test]
fn single_value_tuple() {
// True True
assert_eq!(Int(123).and_int(Int(456)).unwrap_int(), 456);
// True False
assert_eq!(Int(123).and_int(Unit).unwrap_int(), 123);
// False True
assert!(Unit.and_int(Int(123)).is_unit());
// False False
assert!(Unit.and_int(OtherUnit).is_unit());
}
fn tuple(num: u128) -> TestEnum {
Tuple(num.to_string(), num)
}
#[test]
fn multi_value_tuple() {
// True True
assert_eq!(
tuple(123).and_tuple(tuple(456)).unwrap_tuple(),
("456".into(), 456)
);
// True False
assert_eq!(
tuple(123).and_tuple(Unit).unwrap_tuple(),
("123".into(), 123)
);
// False True
assert!(Unit.and_tuple(tuple(123)).is_unit());
// False False
assert!(Unit.and_tuple(Unit).is_unit());
}
| 20 | 61 | 0.59 |
f7ca775991385e627a6abc900f8a4a419b2804de | 142 | pub mod source;
pub mod util;
pub mod alsa;
pub mod modules;
pub mod freeverb;
pub mod karplus_strong;
pub const SAMPLE_RATE: f64 = 48000.0;
| 15.777778 | 37 | 0.753521 |
fcb2da60ea93cd3b7a374b2494da6d7a6575f08d | 7,290 | use crate::workdir::Workdir;
// This macro takes *two* identifiers: one for the test with headers
// and another for the test without headers.
macro_rules! join_test {
($name:ident, $fun:expr) => {
mod $name {
use std::process;
use super::{make_rows, setup};
use crate::workdir::Workdir;
#[test]
fn headers() {
let wrk = setup(stringify!($name), true);
let mut cmd = wrk.command("join");
cmd.args(&["city", "cities.csv", "city", "places.csv"]);
$fun(wrk, cmd, true);
}
#[test]
fn no_headers() {
let n = stringify!(concat_idents!($name, _no_headers));
let wrk = setup(n, false);
let mut cmd = wrk.command("join");
cmd.arg("--no-headers");
cmd.args(&["1", "cities.csv", "1", "places.csv"]);
$fun(wrk, cmd, false);
}
}
};
}
fn setup(name: &str, headers: bool) -> Workdir {
let mut cities = vec![
svec!["Boston", "MA"],
svec!["New York", "NY"],
svec!["San Francisco", "CA"],
svec!["Buffalo", "NY"],
];
let mut places = vec![
svec!["Boston", "Logan Airport"],
svec!["Boston", "Boston Garden"],
svec!["Buffalo", "Ralph Wilson Stadium"],
svec!["Orlando", "Disney World"],
];
if headers {
cities.insert(0, svec!["city", "state"]);
}
if headers {
places.insert(0, svec!["city", "place"]);
}
let wrk = Workdir::new(name);
wrk.create("cities.csv", cities);
wrk.create("places.csv", places);
wrk
}
fn make_rows(headers: bool, left_only: bool, rows: Vec<Vec<String>>) -> Vec<Vec<String>> {
let mut all_rows = vec![];
if headers {
if left_only {
all_rows.push(svec!["city", "state"]);
} else {
all_rows.push(svec!["city", "state", "city", "place"]);
}
}
all_rows.extend(rows.into_iter());
all_rows
}
join_test!(join_inner, |wrk: Workdir,
mut cmd: process::Command,
headers: bool| {
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = make_rows(
headers,
false,
vec![
svec!["Boston", "MA", "Boston", "Logan Airport"],
svec!["Boston", "MA", "Boston", "Boston Garden"],
svec!["Buffalo", "NY", "Buffalo", "Ralph Wilson Stadium"],
],
);
assert_eq!(got, expected);
});
join_test!(
join_outer_left,
|wrk: Workdir, mut cmd: process::Command, headers: bool| {
cmd.arg("--left");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = make_rows(
headers,
false,
vec![
svec!["Boston", "MA", "Boston", "Logan Airport"],
svec!["Boston", "MA", "Boston", "Boston Garden"],
svec!["New York", "NY", "", ""],
svec!["San Francisco", "CA", "", ""],
svec!["Buffalo", "NY", "Buffalo", "Ralph Wilson Stadium"],
],
);
assert_eq!(got, expected);
}
);
join_test!(
join_outer_right,
|wrk: Workdir, mut cmd: process::Command, headers: bool| {
cmd.arg("--right");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = make_rows(
headers,
false,
vec![
svec!["Boston", "MA", "Boston", "Logan Airport"],
svec!["Boston", "MA", "Boston", "Boston Garden"],
svec!["Buffalo", "NY", "Buffalo", "Ralph Wilson Stadium"],
svec!["", "", "Orlando", "Disney World"],
],
);
assert_eq!(got, expected);
}
);
join_test!(
join_outer_full,
|wrk: Workdir, mut cmd: process::Command, headers: bool| {
cmd.arg("--full");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = make_rows(
headers,
false,
vec![
svec!["Boston", "MA", "Boston", "Logan Airport"],
svec!["Boston", "MA", "Boston", "Boston Garden"],
svec!["New York", "NY", "", ""],
svec!["San Francisco", "CA", "", ""],
svec!["Buffalo", "NY", "Buffalo", "Ralph Wilson Stadium"],
svec!["", "", "Orlando", "Disney World"],
],
);
assert_eq!(got, expected);
}
);
join_test!(join_left_semi, |wrk: Workdir,
mut cmd: process::Command,
headers: bool| {
cmd.arg("--left-semi");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = make_rows(headers, true, vec![svec!["Buffalo", "NY"]]);
assert_eq!(got, expected);
});
join_test!(join_left_anti, |wrk: Workdir,
mut cmd: process::Command,
headers: bool| {
cmd.arg("--left-anti");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = make_rows(
headers,
true,
vec![svec!["New York", "NY"], svec!["San Francisco", "CA"]],
);
assert_eq!(got, expected);
});
#[test]
fn join_inner_issue11() {
let a = vec![svec!["1", "2"], svec!["3", "4"], svec!["5", "6"]];
let b = vec![svec!["2", "1"], svec!["4", "3"], svec!["6", "5"]];
let wrk = Workdir::new("join_inner_issue11");
wrk.create("a.csv", a);
wrk.create("b.csv", b);
let mut cmd = wrk.command("join");
cmd.args(&["1,2", "a.csv", "2,1", "b.csv"]);
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = vec![
svec!["1", "2", "2", "1"],
svec!["3", "4", "4", "3"],
svec!["5", "6", "6", "5"],
];
assert_eq!(got, expected);
}
#[test]
fn join_cross() {
let wrk = Workdir::new("join_cross");
wrk.create(
"letters.csv",
vec![svec!["h1", "h2"], svec!["a", "b"], svec!["c", "d"]],
);
wrk.create(
"numbers.csv",
vec![svec!["h3", "h4"], svec!["1", "2"], svec!["3", "4"]],
);
let mut cmd = wrk.command("join");
cmd.arg("--cross")
.args(&["", "letters.csv", "", "numbers.csv"]);
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = vec![
svec!["h1", "h2", "h3", "h4"],
svec!["a", "b", "1", "2"],
svec!["a", "b", "3", "4"],
svec!["c", "d", "1", "2"],
svec!["c", "d", "3", "4"],
];
assert_eq!(got, expected);
}
#[test]
fn join_cross_no_headers() {
let wrk = Workdir::new("join_cross_no_headers");
wrk.create("letters.csv", vec![svec!["a", "b"], svec!["c", "d"]]);
wrk.create("numbers.csv", vec![svec!["1", "2"], svec!["3", "4"]]);
let mut cmd = wrk.command("join");
cmd.arg("--cross")
.arg("--no-headers")
.args(&["", "letters.csv", "", "numbers.csv"]);
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = vec![
svec!["a", "b", "1", "2"],
svec!["a", "b", "3", "4"],
svec!["c", "d", "1", "2"],
svec!["c", "d", "3", "4"],
];
assert_eq!(got, expected);
}
| 30.759494 | 90 | 0.472428 |
bb6c69c6fd362c7b4b486b8d90764cc53b696040 | 2,310 | use std::collections::BTreeMap;
/// Ordered MultiSet
#[derive(Debug, Clone)]
pub struct BMultiSet<T> {
pub inner_map: BTreeMap<T, usize>,
}
impl<T: Ord> Default for BMultiSet<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Ord> BMultiSet<T> {
pub fn new() -> Self {
Self {
inner_map: BTreeMap::new(),
}
}
/// Insert Value
pub fn insert(&mut self, x: T) {
*self.inner_map.entry(x).or_insert(0) += 1;
}
/// Decrement count of the value.
/// If count is zero, remove this value.
pub fn erase_one(&mut self, x: T) -> Option<T> {
if let Some(count) = self.inner_map.get_mut(&x) {
*count -= 1;
if *count == 0 {
self.inner_map.remove(&x);
}
Some(x)
} else {
None
}
}
/// Return count of value
pub fn count(&self, x: &T) -> Option<&usize> {
self.inner_map.get(x)
}
/// Remove value regradless of count
pub fn erase_all(&mut self, x: T) -> Option<T> {
self.inner_map.remove(&x);
Some(x)
}
pub fn min(&self) -> Option<&T> {
self.inner_map.iter().next().map(|x| x.0)
}
pub fn max(&self) -> Option<&T> {
self.inner_map.iter().last().map(|x| x.0)
}
pub fn is_empty(&self) -> bool {
self.inner_map.is_empty()
}
pub fn iter(&self) -> std::collections::btree_map::Keys<'_, T, usize> {
self.inner_map.keys()
}
pub fn contains(&self, x: &T) -> bool {
self.inner_map.contains_key(x)
}
pub fn len(&self) -> usize {
self.inner_map.len()
}
pub fn clear(&mut self) {
self.inner_map.clear()
}
}
pub struct FindIterator<T> {
now: usize,
limit: usize,
val: T,
}
impl<'a, T> Iterator for FindIterator<&'a T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.now < self.limit {
self.now += 1;
Some(self.val)
} else {
None
}
}
}
impl<T: Ord> BMultiSet<T> {
pub fn find(&self, x: T) -> FindIterator<T> {
let limit = *self.count(&x).unwrap_or(&0);
FindIterator {
now: 0,
limit,
val: x,
}
}
}
| 21.192661 | 75 | 0.495238 |
fc7236a353488113340f94bf06628fefea8e06c3 | 3,187 | #[doc = "Register `PRINCE_REGION1_BODY0` reader"]
pub struct R(crate::R<PRINCE_REGION1_BODY0_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<PRINCE_REGION1_BODY0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<PRINCE_REGION1_BODY0_SPEC>> for R {
fn from(reader: crate::R<PRINCE_REGION1_BODY0_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `PRINCE_REGION1_BODY0` writer"]
pub struct W(crate::W<PRINCE_REGION1_BODY0_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<PRINCE_REGION1_BODY0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<PRINCE_REGION1_BODY0_SPEC>> for W {
fn from(writer: crate::W<PRINCE_REGION1_BODY0_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `FIELD` reader - ."]
pub struct FIELD_R(crate::FieldReader<u32, u32>);
impl FIELD_R {
pub(crate) fn new(bits: u32) -> Self {
FIELD_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for FIELD_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `FIELD` writer - ."]
pub struct FIELD_W<'a> {
w: &'a mut W,
}
impl<'a> FIELD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | (value as u32 & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31 - ."]
#[inline(always)]
pub fn field(&self) -> FIELD_R {
FIELD_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31 - ."]
#[inline(always)]
pub fn field(&mut self) -> FIELD_W {
FIELD_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = ".\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [prince_region1_body0](index.html) module"]
pub struct PRINCE_REGION1_BODY0_SPEC;
impl crate::RegisterSpec for PRINCE_REGION1_BODY0_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [prince_region1_body0::R](R) reader structure"]
impl crate::Readable for PRINCE_REGION1_BODY0_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [prince_region1_body0::W](W) writer structure"]
impl crate::Writable for PRINCE_REGION1_BODY0_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets PRINCE_REGION1_BODY0 to value 0"]
impl crate::Resettable for PRINCE_REGION1_BODY0_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 31.87 | 402 | 0.626922 |
673c3ceadfd17144c46490d39d8c74c911b8b9c8 | 1,828 | use super::style::SharedTheme;
use std::convert::TryFrom;
use tui::{
backend::Backend,
buffer::Buffer,
layout::{Margin, Rect},
style::Style,
symbols::{block::FULL, line::THICK_VERTICAL},
widgets::Widget,
Frame,
};
///
struct Scrollbar {
max: u16,
pos: u16,
style_bar: Style,
style_pos: Style,
}
impl Scrollbar {
fn new(max: usize, pos: usize) -> Self {
Self {
max: u16::try_from(max).unwrap_or_default(),
pos: u16::try_from(pos).unwrap_or_default(),
style_pos: Style::default(),
style_bar: Style::default(),
}
}
}
impl Widget for Scrollbar {
fn render(self, area: Rect, buf: &mut Buffer) {
let right = area.right().saturating_sub(1);
if right <= area.left() {
return;
};
let area = area.inner(&Margin {
horizontal: 0,
vertical: 1,
});
if area.height < 4 {
return;
}
if area.height > self.max {
return;
}
for y in area.top()..area.bottom() {
buf.set_string(right, y, THICK_VERTICAL, self.style_bar);
}
let progress = f32::from(self.pos) / f32::from(self.max);
let pos = f32::from(area.height.saturating_sub(1)) * progress;
//TODO: any better way for this?
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
let pos = pos as u16;
buf.set_string(right, area.top() + pos, FULL, self.style_pos);
}
}
pub fn draw_scrollbar<B: Backend>(
f: &mut Frame<B>,
r: Rect,
theme: &SharedTheme,
max: usize,
pos: usize,
) {
let mut widget = Scrollbar::new(max, pos);
widget.style_pos = theme.scroll_bar_pos();
f.render_widget(widget, r)
}
| 23.435897 | 70 | 0.551969 |
ebca5198fb5d4b576a495c01633cf09c69b8da11 | 678 | use std::net::TcpStream;
use std::io::Read;
fn main() {
match TcpStream::connect("127.0.0.1:2342") {
Ok(mut stream) => {
let mut data: Vec<u8> = vec![];
match stream.read_to_end(&mut data) {
Ok(_) => {
// Convert bytes in vector to string
match String::from_utf8(data) {
Ok(s) => println!("{} ", s),
Err(e) => panic!("Failed to convert: {}", e),
}
},
Err(e) => panic!("Failed to get data: {}", e),
}
},
Err(e) => panic!("Failed to connect: {}", e),
}
}
| 30.818182 | 69 | 0.39233 |
01b676c571b7c705d150906ccce45d3d5b5ea88e | 5,067 | use std::ffi::CString;
use std::io;
use std::path::Path;
use std::marker::PhantomData;
use libc::{c_int, size_t, c_char};
use libc::c_void;
use crate::get_error;
use std::mem::transmute;
use crate::sys;
/// A structure that provides an abstract interface to stream I/O.
pub struct RWops<'a> {
raw: *mut sys::SDL_RWops,
_marker: PhantomData<&'a ()>
}
impl<'a> RWops<'a> {
pub unsafe fn raw(&self) -> *mut sys::SDL_RWops { self.raw }
pub unsafe fn from_ll<'b>(raw: *mut sys::SDL_RWops) -> RWops<'b> {
RWops {
raw,
_marker: PhantomData
}
}
/// Creates an SDL file stream.
pub fn from_file<P: AsRef<Path>>(path: P, mode: &str) -> Result<RWops <'static>, String> {
let raw = unsafe {
let path_c = CString::new(path.as_ref().to_str().unwrap()).unwrap();
let mode_c = CString::new(mode).unwrap();
sys::SDL_RWFromFile(path_c.as_ptr() as *const c_char, mode_c.as_ptr() as *const c_char)
};
if raw.is_null() {
Err(get_error())
} else {
Ok(RWops {
raw,
_marker: PhantomData
})
}
}
/// Prepares a read-only memory buffer for use with `RWops`.
///
/// This method can only fail if the buffer size is zero.
pub fn from_bytes(buf: &'a [u8]) -> Result<RWops <'a>, String> {
let raw = unsafe {
sys::SDL_RWFromConstMem(buf.as_ptr() as *const c_void, buf.len() as c_int)
};
if raw.is_null() {
Err(get_error())
} else {
Ok(RWops {
raw,
_marker: PhantomData
})
}
}
/// Reads a `Read` object into a buffer and then passes it to `RWops.from_bytes`.
///
/// The buffer must be provided to this function and must live as long as the
/// `RWops`, but the `RWops` does not take ownership of it.
pub fn from_read<T>(r: &mut T, buffer: &'a mut Vec<u8>) -> Result<RWops<'a>, String>
where T: io::Read + Sized {
match r.read_to_end(buffer) {
Ok(_size) => RWops::from_bytes(buffer),
Err(ioerror) => {
let msg = format!("IO error: {}", ioerror);
Err(msg)
}
}
}
/// Prepares a read-write memory buffer for use with `RWops`.
///
/// This method can only fail if the buffer size is zero.
pub fn from_bytes_mut(buf: &'a mut [u8]) -> Result<RWops <'a>, String> {
let raw = unsafe {
sys::SDL_RWFromMem(buf.as_ptr() as *mut c_void, buf.len() as c_int)
};
if raw.is_null() {
Err(get_error())
} else {
Ok(RWops {
raw,
_marker: PhantomData
})
}
}
/// Gets the stream's total size in bytes.
///
/// Returns `None` if the stream size can't be determined
/// (either because it doesn't make sense for the stream type, or there was an error).
pub fn len(&self) -> Option<usize> {
let result = unsafe { ((*self.raw).size.unwrap())(self.raw) };
match result {
-1 => None,
v => Some(v as usize)
}
}
// Tells if the stream is empty
pub fn is_empty(&self) -> bool {
match self.len() {
Some(s) => s == 0,
None => true,
}
}
}
impl<'a> Drop for RWops<'a> {
fn drop(&mut self) {
let ret = unsafe { ((*self.raw).close.unwrap())(self.raw) };
if ret != 0 {
panic!(get_error());
}
}
}
impl<'a> io::Read for RWops<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let out_len = buf.len() as size_t;
// FIXME: it's better to use as_mut_ptr().
// number of objects read, or 0 at error or end of file.
let ret = unsafe {
((*self.raw).read.unwrap())(self.raw, buf.as_ptr() as *mut c_void, 1, out_len as u64)
};
Ok(ret as usize)
}
}
impl<'a> io::Write for RWops<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let in_len = buf.len() as size_t;
let ret = unsafe {
((*self.raw).write.unwrap())(self.raw, buf.as_ptr() as *const c_void, 1, in_len as u64)
};
Ok(ret as usize)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<'a> io::Seek for RWops<'a> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
// whence code is different from SeekStyle
let (whence, offset) = match pos {
io::SeekFrom::Start(pos) => (sys::RW_SEEK_SET, pos as i64),
io::SeekFrom::End(pos) => (sys::RW_SEEK_END, pos),
io::SeekFrom::Current(pos) => (sys::RW_SEEK_CUR, pos)
};
let ret = unsafe {
((*self.raw).seek.unwrap())(self.raw, offset, transmute(whence))
};
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as u64)
}
}
}
| 29.459302 | 99 | 0.512927 |
4b7644d5c61246c59a812ef562405d9322271717 | 1,014 | /*
*
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LolCatalogGameDataWardSkin {
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<i64>,
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "wardImagePath", skip_serializing_if = "Option::is_none")]
pub ward_image_path: Option<String>,
}
impl LolCatalogGameDataWardSkin {
pub fn new() -> LolCatalogGameDataWardSkin {
LolCatalogGameDataWardSkin {
description: None,
id: None,
name: None,
ward_image_path: None,
}
}
}
| 26.684211 | 109 | 0.658777 |
eba9e602b5957c20a9efe4018de4efc691954d5f | 10,887 | use crate::BPFError;
use solana_rbpf::ebpf;
use thiserror::Error;
/// Error definitions
#[derive(Debug, Error)]
pub enum VerifierError {
/// ProgramLengthNotMultiple
#[error("program length must be a multiple of {} octets", ebpf::INSN_SIZE)]
ProgramLengthNotMultiple,
/// ProgramTooLarge
#[error("program too big, max {}, is {}", ebpf::PROG_MAX_INSNS, .0)]
ProgramTooLarge(usize),
/// NoProgram
#[error("no program set, call prog_set() to load one")]
NoProgram,
#[error("division by 0 (insn #{0})")]
DivisionByZero(usize),
/// UnsupportedLEBEArgument
#[error("unsupported argument for LE/BE (insn #{0})")]
UnsupportedLEBEArgument(usize),
/// LDDWCannotBeLast
#[error("LD_DW instruction cannot be last in program")]
LDDWCannotBeLast,
/// IncompleteLDDW
#[error("incomplete LD_DW instruction (insn #{0})")]
IncompleteLDDW(usize),
/// InfiniteLoop
#[error("infinite loop (insn #{0})")]
InfiniteLoop(usize),
/// JumpOutOfCode
#[error("jump out of code to #{0} (insn #{1})")]
JumpOutOfCode(usize, usize),
/// JumpToMiddleOfLDDW
#[error("jump to middle of LD_DW at #{0} (insn #{1})")]
JumpToMiddleOfLDDW(usize, usize),
/// InvalidSourceRegister
#[error("invalid source register (insn #{0})")]
InvalidSourceRegister(usize),
/// CannotWriteR10
#[error("cannot write into register r10 (insn #{0})")]
CannotWriteR10(usize),
/// InvalidDestinationRegister
#[error("invalid destination register (insn #{0})")]
InvalidDestinationRegister(usize),
/// UnknownOpCode
#[error("unknown eBPF opcode {0:#2x} (insn #{1:?})")]
UnknownOpCode(u8, usize),
/// Shift with overflow
#[error("Shift with overflow at instruction {0}")]
ShiftWithOverflow(usize),
/// Invalid register specified
#[error("Invalid register specified at instruction {0}")]
InvalidRegister(usize),
}
fn check_prog_len(prog: &[u8]) -> Result<(), BPFError> {
if prog.len() % ebpf::INSN_SIZE != 0 {
return Err(VerifierError::ProgramLengthNotMultiple.into());
}
if prog.len() > ebpf::PROG_MAX_SIZE {
return Err(VerifierError::ProgramTooLarge(prog.len() / ebpf::INSN_SIZE).into());
}
if prog.is_empty() {
return Err(VerifierError::NoProgram.into());
}
Ok(())
}
fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BPFError> {
if insn.imm == 0 {
return Err(VerifierError::DivisionByZero(insn_ptr).into());
}
Ok(())
}
fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BPFError> {
match insn.imm {
16 | 32 | 64 => Ok(()),
_ => Err(VerifierError::UnsupportedLEBEArgument(insn_ptr).into()),
}
}
fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> {
if insn_ptr >= (prog.len() / ebpf::INSN_SIZE) {
// Last instruction cannot be LD_DW because there would be no 2nd DW
return Err(VerifierError::LDDWCannotBeLast.into());
}
let next_insn = ebpf::get_insn(prog, insn_ptr + 1);
if next_insn.opc != 0 {
return Err(VerifierError::IncompleteLDDW(insn_ptr).into());
}
Ok(())
}
fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> {
let insn = ebpf::get_insn(prog, insn_ptr);
if insn.off == -1 {
return Err(VerifierError::InfiniteLoop(insn_ptr).into());
}
let dst_insn_ptr = insn_ptr as isize + 1 + insn.off as isize;
if dst_insn_ptr < 0 || dst_insn_ptr as usize >= (prog.len() / ebpf::INSN_SIZE) {
return Err(VerifierError::JumpOutOfCode(dst_insn_ptr as usize, insn_ptr).into());
}
let dst_insn = ebpf::get_insn(prog, dst_insn_ptr as usize);
if dst_insn.opc == 0 {
return Err(VerifierError::JumpToMiddleOfLDDW(dst_insn_ptr as usize, insn_ptr).into());
}
Ok(())
}
fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), BPFError> {
if insn.src > 10 {
return Err(VerifierError::InvalidSourceRegister(insn_ptr).into());
}
match (insn.dst, store) {
(0..=9, _) | (10, true) => Ok(()),
(10, false) => Err(VerifierError::CannotWriteR10(insn_ptr).into()),
(_, _) => Err(VerifierError::InvalidDestinationRegister(insn_ptr).into()),
}
}
/// Check that the imm is a valid shift operand
fn check_imm_shift(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), VerifierError> {
if insn.imm < 0 || insn.imm as u64 >= 64 {
return Err(VerifierError::ShiftWithOverflow(insn_ptr));
}
Ok(())
}
/// Check that the imm is a valid register number
fn check_imm_register(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), VerifierError> {
if insn.imm < 0 || insn.imm > 10 {
return Err(VerifierError::InvalidRegister(insn_ptr));
}
Ok(())
}
#[rustfmt::skip]
pub fn check(prog: &[u8]) -> Result<(), BPFError> {
check_prog_len(prog)?;
let mut insn_ptr: usize = 0;
while insn_ptr * ebpf::INSN_SIZE < prog.len() {
let insn = ebpf::get_insn(prog, insn_ptr);
let mut store = false;
match insn.opc {
// BPF_LD class
ebpf::LD_ABS_B => {},
ebpf::LD_ABS_H => {},
ebpf::LD_ABS_W => {},
ebpf::LD_ABS_DW => {},
ebpf::LD_IND_B => {},
ebpf::LD_IND_H => {},
ebpf::LD_IND_W => {},
ebpf::LD_IND_DW => {},
ebpf::LD_DW_IMM => {
store = true;
check_load_dw(prog, insn_ptr)?;
insn_ptr += 1;
},
// BPF_LDX class
ebpf::LD_B_REG => {},
ebpf::LD_H_REG => {},
ebpf::LD_W_REG => {},
ebpf::LD_DW_REG => {},
// BPF_ST class
ebpf::ST_B_IMM => store = true,
ebpf::ST_H_IMM => store = true,
ebpf::ST_W_IMM => store = true,
ebpf::ST_DW_IMM => store = true,
// BPF_STX class
ebpf::ST_B_REG => store = true,
ebpf::ST_H_REG => store = true,
ebpf::ST_W_REG => store = true,
ebpf::ST_DW_REG => store = true,
// BPF_ALU class
ebpf::ADD32_IMM => {},
ebpf::ADD32_REG => {},
ebpf::SUB32_IMM => {},
ebpf::SUB32_REG => {},
ebpf::MUL32_IMM => {},
ebpf::MUL32_REG => {},
ebpf::DIV32_IMM => { check_imm_nonzero(&insn, insn_ptr)?; },
ebpf::DIV32_REG => {},
ebpf::OR32_IMM => {},
ebpf::OR32_REG => {},
ebpf::AND32_IMM => {},
ebpf::AND32_REG => {},
ebpf::LSH32_IMM => { check_imm_shift(&insn, insn_ptr)?; },
ebpf::LSH32_REG => {},
ebpf::RSH32_IMM => { check_imm_shift(&insn, insn_ptr)?; },
ebpf::RSH32_REG => {},
ebpf::NEG32 => {},
ebpf::MOD32_IMM => { check_imm_nonzero(&insn, insn_ptr)?; },
ebpf::MOD32_REG => {},
ebpf::XOR32_IMM => {},
ebpf::XOR32_REG => {},
ebpf::MOV32_IMM => {},
ebpf::MOV32_REG => {},
ebpf::ARSH32_IMM => { check_imm_shift(&insn, insn_ptr)?; },
ebpf::ARSH32_REG => {},
ebpf::LE => { check_imm_endian(&insn, insn_ptr)?; },
ebpf::BE => { check_imm_endian(&insn, insn_ptr)?; },
// BPF_ALU64 class
ebpf::ADD64_IMM => {},
ebpf::ADD64_REG => {},
ebpf::SUB64_IMM => {},
ebpf::SUB64_REG => {},
ebpf::MUL64_IMM => { check_imm_nonzero(&insn, insn_ptr)?; },
ebpf::MUL64_REG => {},
ebpf::DIV64_IMM => { check_imm_nonzero(&insn, insn_ptr)?; },
ebpf::DIV64_REG => {},
ebpf::OR64_IMM => {},
ebpf::OR64_REG => {},
ebpf::AND64_IMM => {},
ebpf::AND64_REG => {},
ebpf::LSH64_IMM => { check_imm_shift(&insn, insn_ptr)?; },
ebpf::LSH64_REG => {},
ebpf::RSH64_IMM => { check_imm_shift(&insn, insn_ptr)?; },
ebpf::RSH64_REG => {},
ebpf::NEG64 => {},
ebpf::MOD64_IMM => { check_imm_nonzero(&insn, insn_ptr)?; },
ebpf::MOD64_REG => {},
ebpf::XOR64_IMM => {},
ebpf::XOR64_REG => {},
ebpf::MOV64_IMM => {},
ebpf::MOV64_REG => {},
ebpf::ARSH64_IMM => { check_imm_shift(&insn, insn_ptr)?; },
ebpf::ARSH64_REG => {},
// BPF_JMP class
ebpf::JA => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JEQ_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JEQ_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JGT_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JGT_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JGE_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JGE_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JLT_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JLT_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JLE_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JLE_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSET_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSET_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JNE_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JNE_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSGT_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSGT_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSGE_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSGE_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSLT_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSLT_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSLE_IMM => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::JSLE_REG => { check_jmp_offset(prog, insn_ptr)?; },
ebpf::CALL_IMM => {},
ebpf::CALL_REG => { check_imm_register(&insn, insn_ptr)?; },
ebpf::EXIT => {},
_ => {
return Err(VerifierError::UnknownOpCode(insn.opc, insn_ptr).into());
}
}
check_registers(&insn, store, insn_ptr)?;
insn_ptr += 1;
}
// insn_ptr should now be equal to number of instructions.
if insn_ptr != prog.len() / ebpf::INSN_SIZE {
return Err(VerifierError::JumpOutOfCode(insn_ptr, insn_ptr).into());
}
Ok(())
}
| 37.933798 | 94 | 0.542666 |
6169677bf9c8d97823ed75965d3083c88605b459 | 3,923 | // stats - DnDice
// URL: https://github.com/pennbauman/dndice-rs
// Author:
// Penn Bauman ([email protected])
use crate::dice::Die;
/// A set of six ability scores
#[derive(Debug)]
pub struct Scores {
nums: [u8; 6],
}
impl Scores {
fn new(mut nums_array: [u8; 6]) -> Self {
for s in &nums_array {
if *s <= 0 {
panic!("impossible stat")
}
if *s > 30 {
panic!("impossible stat")
}
}
nums_array.sort();
nums_array.reverse();
return Self { nums: nums_array };
}
/// Return scores generated with a method based on the string given
///
/// # Methods
/// * `std` or `standard`: use std()
/// * `d20` or `1d20`: use d20()
/// * `4d6` or `3d6`: use lowest3_4d6()
pub fn from<S: ToString>(method: S) -> Result<Self, ()> {
let m = method.to_string();
if (m == "std") || (m == "standard") {
Ok(Self::std())
} else if (m == "d20") || (m == "1d20") {
Ok(Self::d20())
} else if (m == "4d6") || (m == "3d6") {
Ok(Self::lowest3_4d6())
} else {
Err(())
}
}
/// Return the 5th edition D&D standard ability scores
pub fn std() -> Self {
Self::new([15, 14, 13, 12, 10, 8])
}
/// Return scores generated by rolling 1d20 for each score
pub fn d20() -> Self {
let dice = Die::new(1, 20);
let mut stats = [0; 6];
for i in 0..6 {
stats[i] = dice.roll().num().try_into().unwrap();
}
return Self::new(stats);
}
/// Return scores generated by rolling 4d6 and using the sum of the highest 3 number rolled for each score
pub fn lowest3_4d6() -> Self {
let dice = Die::new(1, 6);
let mut stats = [0; 6];
for i in 0..6 {
let mut sum = 0;
let mut min = dice.roll().num();
for _ in 0..3 {
let temp = dice.roll().num();
if temp >= min {
sum += temp;
} else {
sum += min;
min = temp;
}
}
stats[i] = sum.try_into().unwrap();
}
return Self::new(stats);
}
}
impl std::fmt::Display for Scores {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:2} {:2} {:2} {:2} {:2} {:2}", self.nums[0], self.nums[1], self.nums[2],
self.nums[3], self.nums[4], self.nums[5])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_scores_std() {
let expected = [15, 14, 13, 12, 10, 8];
assert_eq!(expected, Scores::from("std").unwrap().nums);
assert_eq!(expected, Scores::from("standard").unwrap().nums);
}
#[test]
fn test_scores_1d20() {
for _ in 1..10 {
let result = Scores::from("1d20").unwrap().nums;
for i in 0..6 {
assert!(result[i] > 0);
assert!(result[i] <= 20);
}
}
}
#[test]
fn test_scores_d20() {
for _ in 1..10 {
let result = Scores::from("d20").unwrap().nums;
for i in 0..6 {
assert!(result[i] > 0);
assert!(result[i] <= 20);
}
}
}
#[test]
fn test_scores_4d6() {
for _ in 1..10 {
let result = Scores::from("4d6").unwrap().nums;
for i in 0..6 {
assert!(result[i] >= 3);
assert!(result[i] <= 18);
}
}
}
#[test]
fn test_scores_3d6() {
for _ in 1..10 {
let result = Scores::from("3d6").unwrap().nums;
for i in 0..6 {
assert!(result[i] >= 3);
assert!(result[i] <= 18);
}
}
}
}
| 28.021429 | 110 | 0.441499 |
e4b5d49bc1fe5e2358d490bc74d18b05a16cc308 | 4,962 | mod array_emitter;
mod ast_emitter;
mod block_emitter;
mod compilation_info;
mod control_structures;
mod dis;
mod emitter;
mod emitter_scope;
mod expression_emitter;
mod function_declaration_emitter;
mod object_emitter;
mod reference_op_emitter;
mod script_emitter;
extern crate jsparagus_ast as ast;
extern crate jsparagus_scope as scope;
extern crate jsparagus_stencil as stencil;
pub use crate::emitter::{EmitError, EmitOptions};
pub use dis::dis;
use crate::compilation_info::CompilationInfo;
use ast::source_atom_set::SourceAtomSet;
use ast::source_slice_list::SourceSliceList;
use scope::{ScopeBuildError, ScopePassResult};
use stencil::result::EmitResult;
pub fn emit<'alloc>(
ast: &'alloc ast::types::Program<'alloc>,
options: &EmitOptions,
atoms: SourceAtomSet<'alloc>,
slices: SourceSliceList<'alloc>,
) -> Result<EmitResult<'alloc>, EmitError> {
let ScopePassResult {
scope_data_map,
function_declarations,
function_stencil_indices,
function_declaration_properties,
scripts,
error,
} = scope::generate_scope_data(ast);
// Error case for scope analysis will be removed once all syntax is
// supported. Use field instead of Result type here for simplicity.
match error {
Some(ScopeBuildError::NotImplemented(s)) => {
return Err(EmitError::NotImplemented(s));
}
None => {}
}
let compilation_info = CompilationInfo::new(
atoms,
slices,
scope_data_map,
function_declarations,
function_stencil_indices,
function_declaration_properties,
scripts,
);
ast_emitter::emit_program(ast, options, compilation_info)
}
#[cfg(test)]
mod tests {
extern crate jsparagus_parser as parser;
use super::{emit, EmitOptions};
use crate::dis::*;
use ast::source_atom_set::SourceAtomSet;
use ast::source_slice_list::SourceSliceList;
use bumpalo::Bump;
use parser::{parse_script, ParseOptions};
use std::cell::RefCell;
use std::convert::TryInto;
use std::rc::Rc;
use stencil::opcode::*;
use stencil::script::SourceExtent;
fn bytecode(source: &str) -> Vec<u8> {
let alloc = &Bump::new();
let parse_options = ParseOptions::new();
let atoms = Rc::new(RefCell::new(SourceAtomSet::new()));
let slices = Rc::new(RefCell::new(SourceSliceList::new()));
let source_len = source.len();
let parse_result =
parse_script(alloc, source, &parse_options, atoms.clone(), slices.clone())
.expect("Failed to parse");
// println!("{:?}", parse_result);
let extent = SourceExtent::top_level_script(source_len.try_into().unwrap(), 1, 0);
let emit_options = EmitOptions::new(extent);
let result = emit(
alloc.alloc(ast::types::Program::Script(parse_result.unbox())),
&emit_options,
atoms.replace(SourceAtomSet::new_uninitialized()),
slices.replace(SourceSliceList::new()),
)
.expect("Should work!");
let script_data_index: usize = result.scripts[0]
.immutable_script_data
.expect("Top level script should have ImmutableScriptData")
.into();
let script_data = &result.script_data_list[script_data_index];
let bytecode = &script_data.bytecode;
println!("{}", dis(&bytecode));
bytecode.to_vec()
}
#[test]
fn it_works() {
assert_eq!(
bytecode("2 + 2"),
vec![
Opcode::Int8 as u8,
2,
Opcode::Int8 as u8,
2,
Opcode::Add as u8,
Opcode::SetRval as u8,
Opcode::RetRval as u8,
]
)
}
#[test]
fn dis_call() {
assert_eq!(
bytecode("dis()"),
vec![
Opcode::GetGName as u8,
1,
0,
0,
0,
Opcode::Undefined as u8,
Opcode::Call as u8,
0,
0,
Opcode::SetRval as u8,
Opcode::RetRval as u8,
]
)
}
#[test]
fn literals() {
assert_eq!(
bytecode("true"),
vec![
Opcode::True as u8,
Opcode::SetRval as u8,
Opcode::RetRval as u8,
]
);
assert_eq!(
bytecode("false"),
vec![
Opcode::False as u8,
Opcode::SetRval as u8,
Opcode::RetRval as u8,
]
);
//assert_eq!(
// bytecode("'hello world'"),
// vec![
// Opcode::String as u8, 0, 0, 0, 0,
// Opcode::SetRval as u8,
// Opcode::RetRval as u8,
// ]
//);
}
}
| 27.876404 | 90 | 0.55784 |
29770db75fdf4ed00dbb88eded4910a9f1a0982b | 167,228 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
Services_Get(#[from] services::get::Error),
#[error(transparent)]
Services_CreateOrUpdate(#[from] services::create_or_update::Error),
#[error(transparent)]
Services_Update(#[from] services::update::Error),
#[error(transparent)]
Services_Delete(#[from] services::delete::Error),
#[error(transparent)]
Services_ListTestKeys(#[from] services::list_test_keys::Error),
#[error(transparent)]
Services_RegenerateTestKey(#[from] services::regenerate_test_key::Error),
#[error(transparent)]
Services_DisableTestEndpoint(#[from] services::disable_test_endpoint::Error),
#[error(transparent)]
Services_EnableTestEndpoint(#[from] services::enable_test_endpoint::Error),
#[error(transparent)]
Apps_Get(#[from] apps::get::Error),
#[error(transparent)]
Apps_CreateOrUpdate(#[from] apps::create_or_update::Error),
#[error(transparent)]
Apps_Update(#[from] apps::update::Error),
#[error(transparent)]
Apps_Delete(#[from] apps::delete::Error),
#[error(transparent)]
Apps_List(#[from] apps::list::Error),
#[error(transparent)]
Apps_GetResourceUploadUrl(#[from] apps::get_resource_upload_url::Error),
#[error(transparent)]
Bindings_Get(#[from] bindings::get::Error),
#[error(transparent)]
Bindings_CreateOrUpdate(#[from] bindings::create_or_update::Error),
#[error(transparent)]
Bindings_Update(#[from] bindings::update::Error),
#[error(transparent)]
Bindings_Delete(#[from] bindings::delete::Error),
#[error(transparent)]
Bindings_List(#[from] bindings::list::Error),
#[error(transparent)]
Certificates_Get(#[from] certificates::get::Error),
#[error(transparent)]
Certificates_CreateOrUpdate(#[from] certificates::create_or_update::Error),
#[error(transparent)]
Certificates_Delete(#[from] certificates::delete::Error),
#[error(transparent)]
Certificates_List(#[from] certificates::list::Error),
#[error(transparent)]
Services_CheckNameAvailability(#[from] services::check_name_availability::Error),
#[error(transparent)]
CustomDomains_Get(#[from] custom_domains::get::Error),
#[error(transparent)]
CustomDomains_CreateOrUpdate(#[from] custom_domains::create_or_update::Error),
#[error(transparent)]
CustomDomains_Patch(#[from] custom_domains::patch::Error),
#[error(transparent)]
CustomDomains_Delete(#[from] custom_domains::delete::Error),
#[error(transparent)]
CustomDomains_List(#[from] custom_domains::list::Error),
#[error(transparent)]
CustomDomains_Validate(#[from] custom_domains::validate::Error),
#[error(transparent)]
Deployments_Get(#[from] deployments::get::Error),
#[error(transparent)]
Deployments_CreateOrUpdate(#[from] deployments::create_or_update::Error),
#[error(transparent)]
Deployments_Update(#[from] deployments::update::Error),
#[error(transparent)]
Deployments_Delete(#[from] deployments::delete::Error),
#[error(transparent)]
Deployments_List(#[from] deployments::list::Error),
#[error(transparent)]
Deployments_ListClusterAllDeployments(#[from] deployments::list_cluster_all_deployments::Error),
#[error(transparent)]
Deployments_Start(#[from] deployments::start::Error),
#[error(transparent)]
Deployments_Stop(#[from] deployments::stop::Error),
#[error(transparent)]
Deployments_Restart(#[from] deployments::restart::Error),
#[error(transparent)]
Deployments_GetLogFileUrl(#[from] deployments::get_log_file_url::Error),
#[error(transparent)]
Services_ListBySubscription(#[from] services::list_by_subscription::Error),
#[error(transparent)]
Services_List(#[from] services::list::Error),
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
RuntimeVersions_ListRuntimeVersions(#[from] runtime_versions::list_runtime_versions::Error),
#[error(transparent)]
Sku_List(#[from] sku::list::Error),
}
pub mod services {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<models::ServiceResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
resource: &models::ServiceResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(resource).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Created201(models::ServiceResource),
Ok200(models::ServiceResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
resource: &models::ServiceResource,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ServiceResource),
Accepted202(models::ServiceResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_test_keys(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<models::TestKeys, list_test_keys::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/listTestKeys",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(list_test_keys::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_test_keys::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_test_keys::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_test_keys::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::TestKeys =
serde_json::from_slice(rsp_body).map_err(|source| list_test_keys::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_test_keys::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_test_keys::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_test_keys {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn regenerate_test_key(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
regenerate_test_key_request: &models::RegenerateTestKeyRequestPayload,
) -> std::result::Result<models::TestKeys, regenerate_test_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/regenerateTestKey",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(regenerate_test_key::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(regenerate_test_key::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(regenerate_test_key_request).map_err(regenerate_test_key::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(regenerate_test_key::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(regenerate_test_key::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::TestKeys = serde_json::from_slice(rsp_body)
.map_err(|source| regenerate_test_key::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| regenerate_test_key::Error::DeserializeError(source, rsp_body.clone()))?;
Err(regenerate_test_key::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod regenerate_test_key {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn disable_test_endpoint(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<(), disable_test_endpoint::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/disableTestEndpoint",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(disable_test_endpoint::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(disable_test_endpoint::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(disable_test_endpoint::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(disable_test_endpoint::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| disable_test_endpoint::Error::DeserializeError(source, rsp_body.clone()))?;
Err(disable_test_endpoint::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod disable_test_endpoint {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn enable_test_endpoint(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<models::TestKeys, enable_test_endpoint::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/enableTestEndpoint",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(enable_test_endpoint::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(enable_test_endpoint::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(enable_test_endpoint::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(enable_test_endpoint::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::TestKeys = serde_json::from_slice(rsp_body)
.map_err(|source| enable_test_endpoint::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| enable_test_endpoint::Error::DeserializeError(source, rsp_body.clone()))?;
Err(enable_test_endpoint::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod enable_test_endpoint {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn check_name_availability(
operation_config: &crate::OperationConfig,
subscription_id: &str,
location: &str,
availability_parameters: &models::NameAvailabilityParameters,
) -> std::result::Result<models::NameAvailability, check_name_availability::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AppPlatform/locations/{}/checkNameAvailability",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(check_name_availability::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(check_name_availability::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(availability_parameters).map_err(check_name_availability::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(check_name_availability::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(check_name_availability::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::NameAvailability = serde_json::from_slice(rsp_body)
.map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?;
Err(check_name_availability::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod check_name_availability {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<models::ServiceResourceList, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AppPlatform/Spring",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResourceList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<models::ServiceResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ServiceResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod apps {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
sync_status: Option<&str>,
) -> std::result::Result<models::AppResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(sync_status) = sync_status {
url.query_pairs_mut().append_pair("syncStatus", sync_status);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
app_resource: &models::AppResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(app_resource).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::AppResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AppResource),
Created201(models::AppResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
app_resource: &models::AppResource,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(app_resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::AppResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AppResource),
Accepted202(models::AppResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<models::AppResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_resource_upload_url(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
) -> std::result::Result<models::ResourceUploadDefinition, get_resource_upload_url::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/getResourceUploadUrl",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(get_resource_upload_url::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_resource_upload_url::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_resource_upload_url::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_resource_upload_url::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceUploadDefinition = serde_json::from_slice(rsp_body)
.map_err(|source| get_resource_upload_url::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| get_resource_upload_url::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_resource_upload_url::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_resource_upload_url {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod bindings {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
binding_name: &str,
) -> std::result::Result<models::BindingResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/bindings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
binding_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::BindingResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
binding_name: &str,
binding_resource: &models::BindingResource,
) -> std::result::Result<models::BindingResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/bindings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
binding_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(binding_resource).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::BindingResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
binding_name: &str,
binding_resource: &models::BindingResource,
) -> std::result::Result<models::BindingResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/bindings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
binding_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(binding_resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::BindingResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
binding_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/bindings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
binding_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
) -> std::result::Result<models::BindingResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/bindings",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::BindingResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod certificates {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
certificate_name: &str,
) -> std::result::Result<models::CertificateResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/certificates/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
certificate_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CertificateResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
certificate_name: &str,
certificate_resource: &models::CertificateResource,
) -> std::result::Result<models::CertificateResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/certificates/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
certificate_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(certificate_resource).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CertificateResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
certificate_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/certificates/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
certificate_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
) -> std::result::Result<models::CertificateResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/certificates",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CertificateResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod custom_domains {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
domain_name: &str,
) -> std::result::Result<models::CustomDomainResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/domains/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
domain_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CustomDomainResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
domain_name: &str,
domain_resource: &models::CustomDomainResource,
) -> std::result::Result<models::CustomDomainResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/domains/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
domain_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(domain_resource).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CustomDomainResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn patch(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
domain_name: &str,
domain_resource: &models::CustomDomainResource,
) -> std::result::Result<models::CustomDomainResource, patch::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/domains/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
domain_name
);
let mut url = url::Url::parse(url_str).map_err(patch::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(patch::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(domain_resource).map_err(patch::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(patch::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(patch::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CustomDomainResource =
serde_json::from_slice(rsp_body).map_err(|source| patch::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| patch::Error::DeserializeError(source, rsp_body.clone()))?;
Err(patch::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod patch {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
domain_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/domains/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
domain_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
) -> std::result::Result<models::CustomDomainResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/domains",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CustomDomainResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn validate(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
validate_payload: &models::CustomDomainValidatePayload,
) -> std::result::Result<models::CustomDomainValidateResult, validate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/domains/validate",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(validate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(validate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(validate_payload).map_err(validate::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(validate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(validate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CustomDomainValidateResult =
serde_json::from_slice(rsp_body).map_err(|source| validate::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| validate::Error::DeserializeError(source, rsp_body.clone()))?;
Err(validate::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod validate {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod deployments {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
) -> std::result::Result<models::DeploymentResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
deployment_resource: &models::DeploymentResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(deployment_resource).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Created201(models::DeploymentResource),
Accepted202(models::DeploymentResource),
Ok200(models::DeploymentResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
deployment_resource: &models::DeploymentResource,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(deployment_resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Accepted202(models::DeploymentResource),
Ok200(models::DeploymentResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
version: &[&str],
) -> std::result::Result<models::DeploymentResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
for value in version {
url.query_pairs_mut().append_pair("version", value.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_cluster_all_deployments(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
version: &[&str],
) -> std::result::Result<models::DeploymentResourceCollection, list_cluster_all_deployments::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/deployments",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name
);
let mut url = url::Url::parse(url_str).map_err(list_cluster_all_deployments::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_cluster_all_deployments::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
for value in version {
url.query_pairs_mut().append_pair("version", value.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_cluster_all_deployments::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_cluster_all_deployments::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_cluster_all_deployments::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| list_cluster_all_deployments::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_cluster_all_deployments::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_cluster_all_deployments {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn start(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
) -> std::result::Result<start::Response, start::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}/start",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(start::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(start::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?;
Err(start::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod start {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn stop(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
) -> std::result::Result<stop::Response, stop::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}/stop",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(stop::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(stop::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(stop::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(stop::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(stop::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| stop::Error::DeserializeError(source, rsp_body.clone()))?;
Err(stop::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod stop {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn restart(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
) -> std::result::Result<restart::Response, restart::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}/restart",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(restart::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(restart::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(restart::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(restart::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(restart::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(restart::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| restart::Error::DeserializeError(source, rsp_body.clone()))?;
Err(restart::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod restart {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_log_file_url(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
service_name: &str,
app_name: &str,
deployment_name: &str,
) -> std::result::Result<get_log_file_url::Response, get_log_file_url::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AppPlatform/Spring/{}/apps/{}/deployments/{}/getLogFileUrl",
operation_config.base_path(),
subscription_id,
resource_group_name,
service_name,
app_name,
deployment_name
);
let mut url = url::Url::parse(url_str).map_err(get_log_file_url::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_log_file_url::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_log_file_url::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_log_file_url::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::LogFileUrlResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_log_file_url::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get_log_file_url::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(get_log_file_url::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| get_log_file_url::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_log_file_url::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_log_file_url {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::LogFileUrlResponse),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod operations {
use super::{models, API_VERSION};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::AvailableOperations, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.AppPlatform/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AvailableOperations =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod runtime_versions {
use super::{models, API_VERSION};
pub async fn list_runtime_versions(
operation_config: &crate::OperationConfig,
) -> std::result::Result<models::AvailableRuntimeVersions, list_runtime_versions::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.AppPlatform/runtimeVersions", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_runtime_versions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_runtime_versions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_runtime_versions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_runtime_versions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AvailableRuntimeVersions = serde_json::from_slice(rsp_body)
.map_err(|source| list_runtime_versions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError = serde_json::from_slice(rsp_body)
.map_err(|source| list_runtime_versions::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_runtime_versions::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_runtime_versions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sku {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<models::ResourceSkuCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AppPlatform/skus",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceSkuCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 47.386795 | 138 | 0.58544 |
acfbc400ad9fe45ee9a74f4f9ee78a1f4c460ccc | 5,572 | use gumdrop::Options;
use serde::Deserialize;
use simple_error::*;
use std::convert::TryFrom;
use tendermint::{
block,
signature::{self, Signature, Signer, ED25519_SIGNATURE_SIZE},
vote, Time,
};
use crate::{helpers::*, Generator, Header, Validator};
#[derive(Debug, Options, Deserialize, Clone)]
pub struct Vote {
#[options(
help = "validator of this vote (required; can be passed via STDIN)",
parse(try_from_str = "parse_as::<Validator>")
)]
pub validator: Option<Validator>,
#[options(help = "validator index (default: from commit header)")]
pub index: Option<u64>,
#[options(help = "header to sign (default: commit header)")]
pub header: Option<Header>,
#[options(help = "vote type; 'prevote' if set, otherwise 'precommit' (default)")]
pub prevote: Option<()>,
#[options(help = "block height (default: from header)")]
pub height: Option<u64>,
#[options(help = "time (default: from header)")]
pub time: Option<Time>,
#[options(help = "commit round (default: from commit)")]
pub round: Option<u64>,
}
impl Vote {
pub fn new(validator: Validator, header: Header) -> Self {
Vote {
validator: Some(validator),
index: None,
header: Some(header),
prevote: None,
height: None,
time: None,
round: None,
}
}
set_option!(index, u64);
set_option!(header, Header);
set_option!(prevote, bool, if prevote { Some(()) } else { None });
set_option!(height, u64);
set_option!(time, Time);
set_option!(round, u64);
}
impl std::str::FromStr for Vote {
type Err = SimpleError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
parse_as::<Vote>(s)
}
}
impl Generator<vote::Vote> for Vote {
fn merge_with_default(self, default: Self) -> Self {
Vote {
validator: self.validator.or(default.validator),
index: self.index.or(default.index),
header: self.header.or(default.header),
prevote: self.prevote.or(default.prevote),
height: self.height.or(default.height),
time: self.time.or(default.time),
round: self.round.or(default.round),
}
}
fn generate(&self) -> Result<vote::Vote, SimpleError> {
let validator = match &self.validator {
None => bail!("failed to generate vote: validator is missing"),
Some(v) => v,
};
let header = match &self.header {
None => bail!("failed to generate vote: header is missing"),
Some(h) => h,
};
let signer = validator.get_private_key()?;
let block_validator = validator.generate()?;
let block_header = header.generate()?;
let block_id = block::Id::new(block_header.hash(), None);
let validator_index = match self.index {
Some(i) => i,
None => match header.validators.as_ref().unwrap().iter().position(|v| *v == *validator) {
Some(i) => i as u64,
None => bail!("failed to generate vote: no index given and validator not present in the header")
}
};
let mut vote = vote::Vote {
vote_type: if self.prevote.is_some() {
vote::Type::Prevote
} else {
vote::Type::Precommit
},
height: block_header.height,
round: self.round.unwrap_or(1),
block_id: Some(block_id),
timestamp: block_header.time,
validator_address: block_validator.address,
validator_index,
signature: Signature::Ed25519(try_with!(
signature::Ed25519::try_from(&[0_u8; ED25519_SIGNATURE_SIZE][..]),
"failed to construct empty ed25519 signature"
)),
};
let sign_bytes = get_vote_sign_bytes(block_header.chain_id.as_str(), &vote);
vote.signature = signer.sign(sign_bytes.as_slice()).into();
Ok(vote)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_vote() {
let valset1 = [
Validator::new("a"),
Validator::new("b"),
Validator::new("c"),
];
let valset2 = [
Validator::new("b"),
Validator::new("c"),
Validator::new("d"),
];
let now = Time::now();
let header = Header::new(&valset1)
.next_validators(&valset2)
.height(10)
.time(now);
let val = &valset1[1];
let vote = Vote::new(val.clone(), header.clone()).round(2);
let block_val = val.generate().unwrap();
let block_header = header.generate().unwrap();
let block_vote = vote.generate().unwrap();
assert_eq!(block_vote.validator_address, block_val.address);
assert_eq!(block_vote.height, block_header.height);
assert_eq!(block_vote.round, 2);
assert_eq!(block_vote.timestamp, now);
assert_eq!(block_vote.validator_index, 1);
assert_eq!(block_vote.vote_type, vote::Type::Precommit);
let sign_bytes = get_vote_sign_bytes(block_header.chain_id.as_str(), &block_vote);
assert!(!verify_signature(
&valset1[0].get_public_key().unwrap(),
&sign_bytes,
&block_vote.signature
));
assert!(verify_signature(
&valset1[1].get_public_key().unwrap(),
&sign_bytes,
&block_vote.signature
));
}
}
| 33.365269 | 112 | 0.566583 |
f4ea1bafb67d8963af3829b79852f72518d6ce47 | 1,159 | use std::fs::File;
use std::io::prelude::*;
use std::io::{self};
fn main() -> io::Result<()> {
let mut f = File::open("src/day13/input_day13.txt")?;
let mut s = String::new();
f.read_to_string(&mut s)?;
let (timestamp, bus_times) = parse(&s);
let mut best_bus = 0;
let mut wait_time = timestamp;
for bus in bus_times {
let o = timestamp / bus;
let low = o * bus;
let high = (o + 1) * bus;
let mut bus_ts = low;
if low < timestamp {
bus_ts = high;
}
let diff = bus_ts - timestamp;
if diff < wait_time {
wait_time = diff;
best_bus = bus;
}
}
println!("Part 1: {}", wait_time * best_bus);
Ok(())
}
fn parse(s: &String) -> (u32, Vec<u32>) {
let mut bus_times = vec![];
let lines: Vec<&str> = s.lines().collect();
let timestamp: u32 = lines.first().unwrap().parse().unwrap();
let busses = lines.last().unwrap();
for bus in busses.split(',') {
match bus {
"x" => (),
_ => bus_times.push(bus.parse().unwrap()),
}
}
(timestamp, bus_times)
} | 22.72549 | 65 | 0.504745 |
50843fc06f3c5222cec7831dd59228b2990d0882 | 1,507 | //! # NationStates API in Rust
//!
//! This crate consists of a wrapper around the api of the game [NationStates](https://www.nationstates.net).
//! Still a work in progress, currently only supports the standard api's for the nation and region endpoints.
//! The NationStates API documentation is available [here](https://www.nationstates.net/pages/api.html).
//!
//! ## Example
//! ```
//! use nationstates::{NSClient, NSError};
//!
//! #[tokio::main]
//! pub async fn main() -> Result<(), NSError> {
//! let mut client = NSClient::new("Owl Archipelago's API Test")?;
//! let nation = client.get_nation("Owl Archipelago").await?;
//!
//! println!("{}", nation.name);
//! println!("{}", nation.fullname);
//! println!("{}", nation.motto);
//!
//! if nation.in_wa() {
//! println!("WA Member");
//! }
//!
//! println!("{} - {}", nation.founded, nation.firstlogin.to_string());
//!
//! println!();
//! println!("Economic Freedom: {}", nation.freedom.economy);
//! println!();
//!
//! for cause in nation.deaths.causes {
//! println!("{} - {}", cause.reason, cause.percentage);
//! }
//!
//! println!();
//! println!("Endo Count: {}", nation.endorsements.count());
//! for endo in nation.endorsements.get() {
//! println!("{}", endo);
//! }
//!
//! Ok(())
//! }
//! ```
pub mod client;
pub mod errors;
pub mod nation;
pub mod region;
pub use client::NSClient;
pub use errors::NSError;
pub use nation::Nation;
pub use region::Region;
| 28.433962 | 109 | 0.591241 |
ef3e9b75c5456af1b6bee43a73a99396614368e1 | 1,266 | //! winping - Easy ICMP Echo for Windows, and no elevated rights required!
//!
//! Super basic ping.exe example
//!
//! ```rust
//! use std::net::IpAddr;
//! use winping::{Buffer, Pinger};
//!
//! fn main() {
//! let dst = std::env::args()
//! .nth(1)
//! .unwrap_or(String::from("127.0.0.1"))
//! .parse::<IpAddr>()
//! .expect("Could not parse IP Address");
//!
//! let pinger = Pinger::new().unwrap();
//! let mut buffer = Buffer::new();
//!
//! for _ in 0..4 {
//! match pinger.send(dst, &mut buffer) {
//! Ok(rtt) => println!("Response time {} ms.", rtt),
//! Err(err) => println!("{}.", err),
//! }
//! }
//! }
//! ```
//!
#![cfg(any(target_os = "windows", doc))]
#![forbid(unreachable_patterns)]
#![allow(clippy::needless_doctest_main)]
#[cfg(feature = "async")]
mod async_pinger;
mod buffer;
mod error;
mod pinger;
pub(crate) mod util;
#[cfg(feature = "async")]
pub use async_pinger::{set_async_buffer_size, AsyncPinger, AsyncResult, PingFuture};
pub use buffer::Buffer;
pub use error::Error;
pub use pinger::{CreateError, IpPair, Pinger};
#[cfg(test)]
mod tests;
#[cfg(all(test, any(feature = "real-tests-v4", feature = "real-tests-v6")))]
mod real_tests;
| 25.32 | 84 | 0.5703 |
219b8f6387507fbc4ccfd44b761d3a2b9825ace2 | 7,242 | use super::ctxt::Ctxt;
use super::symbol::*;
use proc_macro2::{Group, Span, TokenStream, TokenTree};
use quote::ToTokens;
use syn::{
parse,
parse::Parse,
punctuated::Punctuated,
token::Comma,
Data, DeriveInput, Fields,
Meta::{List, NameValue},
NestedMeta::{Lit, Meta},
};
struct Attr<'c, T> {
cx: &'c Ctxt,
name: Symbol,
tokens: TokenStream,
value: Option<T>,
}
impl<'c, T> Attr<'c, T> {
fn none(cx: &'c Ctxt, name: Symbol) -> Self {
Attr {
cx,
name,
tokens: TokenStream::new(),
value: None,
}
}
fn set<A: ToTokens>(&mut self, obj: A, value: T) {
let tokens = obj.into_token_stream();
if self.value.is_some() {
self.cx.error_spanned_by(
tokens,
format!("duplicate identifier attribute `{}`", self.name),
);
} else {
self.tokens = tokens;
self.value = Some(value);
}
}
fn get(self) -> Option<T> {
self.value
}
fn is_none(&self) -> bool {
self.value.is_none()
}
}
pub struct Attrs {
with: Option<syn::ExprPath>,
params: Option<Punctuated<syn::Expr, Comma>>,
}
const ERR_EXPECT_IDENTIFIER: &str = "expected #[identifier(with = \"mod\", ...)";
impl Attrs {
pub fn get(cx: &Ctxt, input: &syn::DeriveInput) -> Attrs {
let mut params = Attr::none(cx, PARAMS);
let mut with = Attr::none(cx, WITH);
check_data(&cx, input);
let identifier_result = input
.attrs
.iter()
.find(|attr| attr.path == IDENTIFIER)
.ok_or_else(|| {
cx.error_spanned_by(&input, ERR_EXPECT_IDENTIFIER);
});
let meta_items =
identifier_result.map_or(Vec::new(), |identifier| get_meta_items(cx, identifier));
for meta_item in meta_items {
match &meta_item {
// Parse `#[identifier(with = "expr_path")]`
Meta(NameValue(m)) if m.path == WITH => {
if let Ok(w) = parse_lit_into_expr_path(cx, WITH, &m.lit) {
with.set(&m.path, w);
}
}
// Parse `#[identifier(params = "param1, param2")]`
Meta(NameValue(m)) if m.path == PARAMS => {
if let Ok(p) = parse_lit_into_params(cx, PARAMS, &m.lit) {
params.set(&m.path, p);
}
}
Meta(meta_item) => {
let path = meta_item
.path()
.into_token_stream()
.to_string()
.replace(' ', "");
cx.error_spanned_by(
meta_item.path(),
format!("unknown identifier attribute `{}`", path),
);
}
Lit(lit) => {
cx.error_spanned_by(lit, "unexpected literal in identifier attribute");
}
}
}
if identifier_result.is_ok() && with.is_none() {
cx.error_spanned_by(
&identifier_result.unwrap().tokens,
"The `with` attribute is required.",
);
}
Attrs {
with: with.get(),
params: params.get(),
}
}
pub fn with(&self) -> Option<&syn::ExprPath> {
self.with.as_ref()
}
pub fn params(&self) -> Option<&Punctuated<syn::Expr, Comma>> {
self.params.as_ref()
}
}
fn check_data(cx: &Ctxt, input: &DeriveInput) {
const ERROR: &str =
"Only TupleStruct with a single `u128` unnamed field is supported, i.e. `struct Id(u128);`";
match &input.data {
Data::Struct(data_struct) => {
if let Fields::Unnamed(fields) = &data_struct.fields {
if fields.unnamed.len() == 1 {
let field = fields.unnamed.first().unwrap();
if let syn::Type::Path(ty_path) = &field.ty {
if !ty_path.path.is_ident("u128") {
cx.error_spanned_by(
&ty_path.path,
"Only `u128` primitive type is supported.",
);
}
return ();
}
}
}
cx.error_spanned_by(&data_struct.fields, ERROR);
}
Data::Enum(data_enum) => {
cx.error_spanned_by(&data_enum.enum_token, ERROR);
}
Data::Union(data_union) => {
cx.error_spanned_by(&data_union.union_token, ERROR);
}
};
}
pub fn get_meta_items(cx: &Ctxt, attr: &syn::Attribute) -> Vec<syn::NestedMeta> {
match attr.parse_meta() {
Ok(List(meta)) => meta.nested.into_iter().collect(),
Ok(other) => {
cx.error_spanned_by(other, ERR_EXPECT_IDENTIFIER);
Vec::new()
}
Err(err) => {
cx.syn_error(err);
Vec::new()
}
}
}
fn get_lit_str<'a>(cx: &Ctxt, attr_name: Symbol, lit: &'a syn::Lit) -> Result<&'a syn::LitStr, ()> {
get_lit_str2(cx, attr_name, attr_name, lit)
}
fn get_lit_str2<'a>(
cx: &Ctxt,
attr_name: Symbol,
meta_item_name: Symbol,
lit: &'a syn::Lit,
) -> Result<&'a syn::LitStr, ()> {
if let syn::Lit::Str(lit) = lit {
Ok(lit)
} else {
cx.error_spanned_by(
lit,
format!(
"expected identifier {} attribute to be a string: `{} = \"...\"`",
attr_name, meta_item_name
),
);
Err(())
}
}
fn parse_lit_into_expr_path(
cx: &Ctxt,
attr_name: Symbol,
lit: &syn::Lit,
) -> Result<syn::ExprPath, ()> {
let string = get_lit_str(cx, attr_name, lit)?;
parse_lit_str(string).map_err(|_| {
cx.error_spanned_by(lit, format!("failed to parse path: {:?}", string.value()))
})
}
fn parse_lit_into_params(
cx: &Ctxt,
attr_name: Symbol,
lit: &syn::Lit,
) -> Result<Punctuated<syn::Expr, Comma>, ()> {
let string = get_lit_str(cx, attr_name, lit)?;
return string
.parse_with(Punctuated::<syn::Expr, Comma>::parse_terminated)
.map_err(|_| {
cx.error_spanned_by(lit, format!("failed to parse params: {:?}", string.value()))
});
}
fn parse_lit_str<T>(s: &syn::LitStr) -> parse::Result<T>
where
T: Parse,
{
let tokens = spanned_tokens(s)?;
syn::parse2(tokens)
}
fn spanned_tokens(s: &syn::LitStr) -> parse::Result<TokenStream> {
let stream = syn::parse_str(&s.value())?;
Ok(respan_token_stream(stream, s.span()))
}
fn respan_token_stream(stream: TokenStream, span: Span) -> TokenStream {
stream
.into_iter()
.map(|token| respan_token_tree(token, span))
.collect()
}
fn respan_token_tree(mut token: TokenTree, span: Span) -> TokenTree {
if let TokenTree::Group(g) = &mut token {
*g = Group::new(g.delimiter(), respan_token_stream(g.stream(), span));
}
token.set_span(span);
token
}
| 28.4 | 100 | 0.499171 |
0339d8c3559ca15a554d72cf6b43ddff5dcad804 | 3,305 | #[doc = "Register `reg_key_slot_9_w3` reader"]
pub struct R(crate::R<REG_KEY_SLOT_9_W3_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<REG_KEY_SLOT_9_W3_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<REG_KEY_SLOT_9_W3_SPEC>> for R {
fn from(reader: crate::R<REG_KEY_SLOT_9_W3_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `reg_key_slot_9_w3` writer"]
pub struct W(crate::W<REG_KEY_SLOT_9_W3_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<REG_KEY_SLOT_9_W3_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<REG_KEY_SLOT_9_W3_SPEC>> for W {
fn from(writer: crate::W<REG_KEY_SLOT_9_W3_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `reg_key_slot_9_w3` reader - "]
pub struct REG_KEY_SLOT_9_W3_R(crate::FieldReader<u32, u32>);
impl REG_KEY_SLOT_9_W3_R {
pub(crate) fn new(bits: u32) -> Self {
REG_KEY_SLOT_9_W3_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for REG_KEY_SLOT_9_W3_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `reg_key_slot_9_w3` writer - "]
pub struct REG_KEY_SLOT_9_W3_W<'a> {
w: &'a mut W,
}
impl<'a> REG_KEY_SLOT_9_W3_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | (value as u32 & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn reg_key_slot_9_w3(&self) -> REG_KEY_SLOT_9_W3_R {
REG_KEY_SLOT_9_W3_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn reg_key_slot_9_w3(&mut self) -> REG_KEY_SLOT_9_W3_W {
REG_KEY_SLOT_9_W3_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "reg_key_slot_9_w3.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [reg_key_slot_9_w3](index.html) module"]
pub struct REG_KEY_SLOT_9_W3_SPEC;
impl crate::RegisterSpec for REG_KEY_SLOT_9_W3_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [reg_key_slot_9_w3::R](R) reader structure"]
impl crate::Readable for REG_KEY_SLOT_9_W3_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [reg_key_slot_9_w3::W](W) writer structure"]
impl crate::Writable for REG_KEY_SLOT_9_W3_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets reg_key_slot_9_w3 to value 0"]
impl crate::Resettable for REG_KEY_SLOT_9_W3_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 33.05 | 416 | 0.643268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.